var/home/core/zuul-output/0000755000175000017500000000000015104634556014536 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015104652235015474 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005341624415104652226017711 0ustar rootrootNov 11 13:29:48 crc systemd[1]: Starting Kubernetes Kubelet... Nov 11 13:29:48 crc restorecon[4692]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:48 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 11 13:29:49 crc restorecon[4692]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 11 13:29:49 crc kubenswrapper[4842]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.780180 4842 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787179 4842 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787220 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787235 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787247 4842 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787259 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787268 4842 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787277 4842 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787286 4842 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787295 4842 feature_gate.go:330] unrecognized feature gate: Example Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787303 4842 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787311 4842 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787320 4842 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787328 4842 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787338 4842 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787347 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787356 4842 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787364 4842 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787372 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787380 4842 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787388 4842 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787396 4842 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787408 4842 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787418 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787428 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787437 4842 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787444 4842 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787453 4842 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787461 4842 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787468 4842 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787476 4842 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787484 4842 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787492 4842 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787512 4842 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787520 4842 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787528 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787537 4842 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787545 4842 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787553 4842 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787561 4842 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787568 4842 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787576 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787584 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787592 4842 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787600 4842 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787607 4842 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787615 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787623 4842 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787634 4842 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787642 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787650 4842 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787658 4842 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787666 4842 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787675 4842 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787683 4842 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787691 4842 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787699 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787706 4842 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787714 4842 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787722 4842 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787729 4842 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787737 4842 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787748 4842 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787758 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787767 4842 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787775 4842 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787783 4842 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787791 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787801 4842 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787810 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787823 4842 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.787835 4842 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788008 4842 flags.go:64] FLAG: --address="0.0.0.0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788025 4842 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788039 4842 flags.go:64] FLAG: --anonymous-auth="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788051 4842 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788062 4842 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788072 4842 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788084 4842 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788129 4842 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788142 4842 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788153 4842 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788164 4842 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788174 4842 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788183 4842 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788193 4842 flags.go:64] FLAG: --cgroup-root="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788202 4842 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788211 4842 flags.go:64] FLAG: --client-ca-file="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788220 4842 flags.go:64] FLAG: --cloud-config="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788228 4842 flags.go:64] FLAG: --cloud-provider="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788237 4842 flags.go:64] FLAG: --cluster-dns="[]" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788257 4842 flags.go:64] FLAG: --cluster-domain="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788266 4842 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788275 4842 flags.go:64] FLAG: --config-dir="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788284 4842 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788295 4842 flags.go:64] FLAG: --container-log-max-files="5" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788307 4842 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788317 4842 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788327 4842 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788336 4842 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788346 4842 flags.go:64] FLAG: --contention-profiling="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788355 4842 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788365 4842 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788374 4842 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788384 4842 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788397 4842 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788407 4842 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788416 4842 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788428 4842 flags.go:64] FLAG: --enable-load-reader="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788437 4842 flags.go:64] FLAG: --enable-server="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788448 4842 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788460 4842 flags.go:64] FLAG: --event-burst="100" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788471 4842 flags.go:64] FLAG: --event-qps="50" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788481 4842 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788490 4842 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788501 4842 flags.go:64] FLAG: --eviction-hard="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788521 4842 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788531 4842 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788540 4842 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788549 4842 flags.go:64] FLAG: --eviction-soft="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788558 4842 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788567 4842 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788576 4842 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788586 4842 flags.go:64] FLAG: --experimental-mounter-path="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788595 4842 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788604 4842 flags.go:64] FLAG: --fail-swap-on="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788613 4842 flags.go:64] FLAG: --feature-gates="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788625 4842 flags.go:64] FLAG: --file-check-frequency="20s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788634 4842 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788644 4842 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788653 4842 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788662 4842 flags.go:64] FLAG: --healthz-port="10248" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788672 4842 flags.go:64] FLAG: --help="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788681 4842 flags.go:64] FLAG: --hostname-override="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788690 4842 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788699 4842 flags.go:64] FLAG: --http-check-frequency="20s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788708 4842 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788717 4842 flags.go:64] FLAG: --image-credential-provider-config="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788726 4842 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788735 4842 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788744 4842 flags.go:64] FLAG: --image-service-endpoint="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788753 4842 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788762 4842 flags.go:64] FLAG: --kube-api-burst="100" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788772 4842 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788781 4842 flags.go:64] FLAG: --kube-api-qps="50" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788790 4842 flags.go:64] FLAG: --kube-reserved="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788799 4842 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788808 4842 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788818 4842 flags.go:64] FLAG: --kubelet-cgroups="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788827 4842 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788836 4842 flags.go:64] FLAG: --lock-file="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788846 4842 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788855 4842 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788864 4842 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788878 4842 flags.go:64] FLAG: --log-json-split-stream="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788887 4842 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788896 4842 flags.go:64] FLAG: --log-text-split-stream="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788905 4842 flags.go:64] FLAG: --logging-format="text" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788913 4842 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788924 4842 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788933 4842 flags.go:64] FLAG: --manifest-url="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788942 4842 flags.go:64] FLAG: --manifest-url-header="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788954 4842 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788963 4842 flags.go:64] FLAG: --max-open-files="1000000" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788974 4842 flags.go:64] FLAG: --max-pods="110" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788984 4842 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.788993 4842 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789002 4842 flags.go:64] FLAG: --memory-manager-policy="None" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789010 4842 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789020 4842 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789029 4842 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789039 4842 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789060 4842 flags.go:64] FLAG: --node-status-max-images="50" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789070 4842 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789080 4842 flags.go:64] FLAG: --oom-score-adj="-999" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789089 4842 flags.go:64] FLAG: --pod-cidr="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789121 4842 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789136 4842 flags.go:64] FLAG: --pod-manifest-path="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789145 4842 flags.go:64] FLAG: --pod-max-pids="-1" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789154 4842 flags.go:64] FLAG: --pods-per-core="0" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789163 4842 flags.go:64] FLAG: --port="10250" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789173 4842 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789182 4842 flags.go:64] FLAG: --provider-id="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789191 4842 flags.go:64] FLAG: --qos-reserved="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789200 4842 flags.go:64] FLAG: --read-only-port="10255" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789209 4842 flags.go:64] FLAG: --register-node="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789218 4842 flags.go:64] FLAG: --register-schedulable="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789230 4842 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789245 4842 flags.go:64] FLAG: --registry-burst="10" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789256 4842 flags.go:64] FLAG: --registry-qps="5" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789265 4842 flags.go:64] FLAG: --reserved-cpus="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789274 4842 flags.go:64] FLAG: --reserved-memory="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789320 4842 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789331 4842 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789342 4842 flags.go:64] FLAG: --rotate-certificates="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789351 4842 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789360 4842 flags.go:64] FLAG: --runonce="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789369 4842 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789378 4842 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789388 4842 flags.go:64] FLAG: --seccomp-default="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789397 4842 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789406 4842 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789416 4842 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789425 4842 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789435 4842 flags.go:64] FLAG: --storage-driver-password="root" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789444 4842 flags.go:64] FLAG: --storage-driver-secure="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789453 4842 flags.go:64] FLAG: --storage-driver-table="stats" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789463 4842 flags.go:64] FLAG: --storage-driver-user="root" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789473 4842 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789482 4842 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789492 4842 flags.go:64] FLAG: --system-cgroups="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789501 4842 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789516 4842 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789525 4842 flags.go:64] FLAG: --tls-cert-file="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789535 4842 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789547 4842 flags.go:64] FLAG: --tls-min-version="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789556 4842 flags.go:64] FLAG: --tls-private-key-file="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789566 4842 flags.go:64] FLAG: --topology-manager-policy="none" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789578 4842 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789589 4842 flags.go:64] FLAG: --topology-manager-scope="container" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789601 4842 flags.go:64] FLAG: --v="2" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789618 4842 flags.go:64] FLAG: --version="false" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789635 4842 flags.go:64] FLAG: --vmodule="" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789650 4842 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.789664 4842 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789882 4842 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789893 4842 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789902 4842 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789910 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789918 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789926 4842 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789934 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789942 4842 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789949 4842 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789958 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789966 4842 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789974 4842 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789984 4842 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.789993 4842 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790002 4842 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790009 4842 feature_gate.go:330] unrecognized feature gate: Example Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790020 4842 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790030 4842 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790040 4842 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790050 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790059 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790069 4842 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790079 4842 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790087 4842 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790094 4842 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790129 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790137 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790145 4842 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790153 4842 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790161 4842 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790168 4842 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790176 4842 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790184 4842 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790192 4842 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790202 4842 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790211 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790219 4842 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790227 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790235 4842 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790243 4842 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790251 4842 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790258 4842 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790266 4842 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790275 4842 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790283 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790291 4842 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790298 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790312 4842 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790320 4842 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790328 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790336 4842 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790344 4842 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790352 4842 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790360 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790368 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790375 4842 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790383 4842 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790391 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790399 4842 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790407 4842 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790415 4842 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790422 4842 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790430 4842 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790437 4842 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790445 4842 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790456 4842 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790466 4842 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790475 4842 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790484 4842 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790493 4842 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.790504 4842 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.790531 4842 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.806242 4842 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.806360 4842 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806534 4842 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806557 4842 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806570 4842 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806584 4842 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806602 4842 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806617 4842 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806630 4842 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806642 4842 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806655 4842 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806670 4842 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806684 4842 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806697 4842 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806709 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806720 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806732 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806744 4842 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806755 4842 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806765 4842 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806777 4842 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806790 4842 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806802 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806813 4842 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806826 4842 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806837 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806848 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806863 4842 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806874 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806884 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806895 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806906 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806917 4842 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806927 4842 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806938 4842 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806949 4842 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806961 4842 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806972 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806982 4842 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.806991 4842 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807000 4842 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807008 4842 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807016 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807026 4842 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807034 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807042 4842 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807051 4842 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807130 4842 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807141 4842 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807150 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807158 4842 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807166 4842 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807175 4842 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807185 4842 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807197 4842 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807209 4842 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807223 4842 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807233 4842 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807243 4842 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807252 4842 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807261 4842 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807269 4842 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807277 4842 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807290 4842 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807299 4842 feature_gate.go:330] unrecognized feature gate: Example Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807307 4842 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807318 4842 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807327 4842 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807336 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807345 4842 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807353 4842 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807363 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807371 4842 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.807386 4842 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807688 4842 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807710 4842 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807722 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807732 4842 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807747 4842 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807763 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807777 4842 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807788 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807799 4842 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807811 4842 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807822 4842 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807834 4842 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807846 4842 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807857 4842 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807868 4842 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807879 4842 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807890 4842 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807900 4842 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807911 4842 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807921 4842 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807933 4842 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807943 4842 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807954 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807966 4842 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807977 4842 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.807994 4842 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808009 4842 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808020 4842 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808030 4842 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808041 4842 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808051 4842 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808061 4842 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808072 4842 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808083 4842 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808093 4842 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808138 4842 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808149 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808159 4842 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808169 4842 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808179 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808190 4842 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808201 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808212 4842 feature_gate.go:330] unrecognized feature gate: Example Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808226 4842 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808237 4842 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808247 4842 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808260 4842 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808270 4842 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808282 4842 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808294 4842 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808305 4842 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808316 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808326 4842 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808334 4842 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808343 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808354 4842 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808362 4842 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808371 4842 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808383 4842 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808393 4842 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808405 4842 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808418 4842 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808430 4842 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808443 4842 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808454 4842 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808465 4842 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808476 4842 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808489 4842 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808498 4842 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808507 4842 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.808516 4842 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.808530 4842 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.808832 4842 server.go:940] "Client rotation is on, will bootstrap in background" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.818077 4842 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.818362 4842 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.821061 4842 server.go:997] "Starting client certificate rotation" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.821141 4842 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.821327 4842 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-19 21:07:39.39391266 +0000 UTC Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.821426 4842 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 199h37m49.572488973s for next certificate rotation Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.850052 4842 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.853546 4842 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.879348 4842 log.go:25] "Validated CRI v1 runtime API" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.922468 4842 log.go:25] "Validated CRI v1 image API" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.925812 4842 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.933923 4842 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-11-13-25-05-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.934038 4842 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:45 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.954511 4842 manager.go:217] Machine: {Timestamp:2025-11-11 13:29:49.950801065 +0000 UTC m=+0.611090724 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:8b08dd54-690d-45ec-9438-693834fc9d7e BootID:9ac0780f-86e7-49c9-975f-c53d42cb190a Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:45 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:8d:9a:23 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:8d:9a:23 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:4e:92:81 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:c4:79:28 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:29:74:bb Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f3:9e:d1 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:9a:2f:08:70:ac:ec Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:36:f1:30:ef:34:2c Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.954838 4842 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.955232 4842 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.957019 4842 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.957253 4842 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.957303 4842 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.957580 4842 topology_manager.go:138] "Creating topology manager with none policy" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.957594 4842 container_manager_linux.go:303] "Creating device plugin manager" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.958228 4842 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.958266 4842 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.958469 4842 state_mem.go:36] "Initialized new in-memory state store" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.958575 4842 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.963582 4842 kubelet.go:418] "Attempting to sync node with API server" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.963617 4842 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.963648 4842 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.963666 4842 kubelet.go:324] "Adding apiserver pod source" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.963687 4842 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.970498 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.970528 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.970678 4842 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.970691 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.970728 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.971586 4842 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.974379 4842 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976292 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976352 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976376 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976395 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976424 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976442 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976460 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976493 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976517 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976535 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976561 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.976581 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.978049 4842 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.978946 4842 server.go:1280] "Started kubelet" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.979877 4842 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.980208 4842 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.980957 4842 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:49 crc systemd[1]: Started Kubernetes Kubelet. Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.981866 4842 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.987330 4842 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.987387 4842 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.987881 4842 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 10:31:29.323236203 +0000 UTC Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.987968 4842 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1029h1m39.335276812s for next certificate rotation Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.988934 4842 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.988998 4842 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.989007 4842 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.989184 4842 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 11 13:29:49 crc kubenswrapper[4842]: W1111 13:29:49.991063 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.991187 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.992421 4842 factory.go:55] Registering systemd factory Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.992456 4842 factory.go:221] Registration of the systemd container factory successfully Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.992814 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.155:6443: connect: connection refused" interval="200ms" Nov 11 13:29:49 crc kubenswrapper[4842]: E1111 13:29:49.990969 4842 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.155:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.1876f6ea37ab6aec default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-11 13:29:49.97888894 +0000 UTC m=+0.639178609,LastTimestamp:2025-11-11 13:29:49.97888894 +0000 UTC m=+0.639178609,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.995648 4842 factory.go:153] Registering CRI-O factory Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.995716 4842 factory.go:221] Registration of the crio container factory successfully Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.995884 4842 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.995983 4842 factory.go:103] Registering Raw factory Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.996027 4842 manager.go:1196] Started watching for new ooms in manager Nov 11 13:29:49 crc kubenswrapper[4842]: I1111 13:29:49.996724 4842 server.go:460] "Adding debug handlers to kubelet server" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.000182 4842 manager.go:319] Starting recovery of all containers Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009397 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009515 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009552 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009581 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009612 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009641 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009670 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009690 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009729 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009753 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009773 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009797 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009828 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009861 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009882 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009904 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009963 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.009988 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010010 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010038 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010059 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010079 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010127 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010150 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010172 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010193 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010221 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010244 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010272 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010292 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010324 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010346 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010366 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010390 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010413 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010434 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010457 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010479 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010544 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010568 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010596 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010625 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010652 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010679 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010715 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010746 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010773 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010801 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010830 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010860 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010887 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010914 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010956 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.010987 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011048 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011079 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011144 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011176 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011203 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011231 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011263 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011292 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011319 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011346 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011372 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011443 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011472 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011500 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011531 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011563 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011594 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011623 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011654 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011685 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011715 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011745 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011779 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011807 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011837 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011865 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011893 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011923 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011954 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.011987 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012015 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012045 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012074 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012145 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012181 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012209 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012237 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012264 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012299 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012327 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012354 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012382 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012414 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012445 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012479 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012513 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012545 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012575 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012604 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012633 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012686 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012816 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012848 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012880 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012914 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012943 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.012971 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013003 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013033 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013062 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013092 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013163 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013191 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013224 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013252 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013292 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013316 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013337 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013358 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013380 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013404 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013425 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013446 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013466 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013485 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013505 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013525 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013544 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013581 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013608 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013635 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013661 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013690 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013717 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013743 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013764 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013787 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013807 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013826 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013846 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013867 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013889 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013909 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013930 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013950 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013970 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.013992 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014011 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014034 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014054 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014077 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014141 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014174 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014199 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014231 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014264 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014295 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014317 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014344 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014449 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014486 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014531 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014561 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014593 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014628 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014655 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014683 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014713 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014742 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014770 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014801 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014829 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014854 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014885 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014916 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014946 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.014977 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.015036 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.015064 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.015092 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.015148 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017243 4842 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017317 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017344 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017368 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017394 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017414 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017433 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017461 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017486 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017528 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017550 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017569 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017590 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017610 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017629 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017653 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017673 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017698 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017711 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017728 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017741 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017754 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017767 4842 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017780 4842 reconstruct.go:97] "Volume reconstruction finished" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.017789 4842 reconciler.go:26] "Reconciler: start to sync state" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.029487 4842 manager.go:324] Recovery completed Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.049631 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.051797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.051832 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.051842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.053591 4842 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.053616 4842 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.053645 4842 state_mem.go:36] "Initialized new in-memory state store" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.055087 4842 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.057406 4842 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.057502 4842 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.057748 4842 kubelet.go:2335] "Starting kubelet main sync loop" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.057848 4842 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 11 13:29:50 crc kubenswrapper[4842]: W1111 13:29:50.058755 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.058838 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.073659 4842 policy_none.go:49] "None policy: Start" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.074688 4842 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.074721 4842 state_mem.go:35] "Initializing new in-memory state store" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.089962 4842 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.141521 4842 manager.go:334] "Starting Device Plugin manager" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.141585 4842 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.141601 4842 server.go:79] "Starting device plugin registration server" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.142266 4842 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.142285 4842 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.142572 4842 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.142677 4842 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.142686 4842 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.150866 4842 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.158249 4842 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.158428 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.160831 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.161002 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.161020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.161230 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.161469 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.161545 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162551 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162603 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162765 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162889 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.162960 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164052 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164082 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164117 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164121 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164141 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164704 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.164960 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.165140 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.165185 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166033 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166062 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166079 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166277 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166464 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166496 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166510 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166541 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.166601 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167143 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167164 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167195 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167396 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167421 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167423 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.167474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.168489 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.168527 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.168541 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.194685 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.155:6443: connect: connection refused" interval="400ms" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220852 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220891 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220910 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220929 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220946 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.220961 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221084 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221181 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221231 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221277 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221320 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221343 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221367 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221388 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.221411 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.242576 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.243934 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.243984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.244000 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.244031 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.244530 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.155:6443: connect: connection refused" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322733 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322830 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322868 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322957 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.322979 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323004 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323025 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323049 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323071 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323135 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323156 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323178 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323199 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323219 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323828 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.323970 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324025 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324044 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324084 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324146 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324166 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324141 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324044 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324063 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324233 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324298 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324333 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324356 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.324585 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.445526 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.447613 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.447672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.447696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.447747 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.448486 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.155:6443: connect: connection refused" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.507967 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.532574 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.547832 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: W1111 13:29:50.556405 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-99915373c18576802daa08b6061f277867ff64028eea5b4bfb1787fab40ac86f WatchSource:0}: Error finding container 99915373c18576802daa08b6061f277867ff64028eea5b4bfb1787fab40ac86f: Status 404 returned error can't find the container with id 99915373c18576802daa08b6061f277867ff64028eea5b4bfb1787fab40ac86f Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.559983 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.566167 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 11 13:29:50 crc kubenswrapper[4842]: W1111 13:29:50.581813 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-5b7cc64cffc00ef3785fbd1953db574a7afb60826b25be39df9f21cdfe66b15a WatchSource:0}: Error finding container 5b7cc64cffc00ef3785fbd1953db574a7afb60826b25be39df9f21cdfe66b15a: Status 404 returned error can't find the container with id 5b7cc64cffc00ef3785fbd1953db574a7afb60826b25be39df9f21cdfe66b15a Nov 11 13:29:50 crc kubenswrapper[4842]: W1111 13:29:50.586215 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-4057883b7950bd0a75638aeaa1291309f1c2592c5d8b744f1684531256030a00 WatchSource:0}: Error finding container 4057883b7950bd0a75638aeaa1291309f1c2592c5d8b744f1684531256030a00: Status 404 returned error can't find the container with id 4057883b7950bd0a75638aeaa1291309f1c2592c5d8b744f1684531256030a00 Nov 11 13:29:50 crc kubenswrapper[4842]: W1111 13:29:50.593224 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-2fdf097bfb560c6bf174ed0001dc86cb6dd4ff2c1070c2533f71cf0508a320b2 WatchSource:0}: Error finding container 2fdf097bfb560c6bf174ed0001dc86cb6dd4ff2c1070c2533f71cf0508a320b2: Status 404 returned error can't find the container with id 2fdf097bfb560c6bf174ed0001dc86cb6dd4ff2c1070c2533f71cf0508a320b2 Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.595498 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.155:6443: connect: connection refused" interval="800ms" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.849318 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.851247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.851300 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.851317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.851357 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: E1111 13:29:50.851844 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.155:6443: connect: connection refused" node="crc" Nov 11 13:29:50 crc kubenswrapper[4842]: I1111 13:29:50.982410 4842 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.062892 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"4057883b7950bd0a75638aeaa1291309f1c2592c5d8b744f1684531256030a00"} Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.063855 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5b7cc64cffc00ef3785fbd1953db574a7afb60826b25be39df9f21cdfe66b15a"} Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.065117 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"85cbac299971f1ba178a80c230900fc2ee8ab7f90a5f15b26de6c0f68aee534a"} Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.066397 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"99915373c18576802daa08b6061f277867ff64028eea5b4bfb1787fab40ac86f"} Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.067325 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2fdf097bfb560c6bf174ed0001dc86cb6dd4ff2c1070c2533f71cf0508a320b2"} Nov 11 13:29:51 crc kubenswrapper[4842]: W1111 13:29:51.073017 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.073079 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:51 crc kubenswrapper[4842]: W1111 13:29:51.089995 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.090067 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.397001 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.155:6443: connect: connection refused" interval="1.6s" Nov 11 13:29:51 crc kubenswrapper[4842]: W1111 13:29:51.533602 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.533707 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:51 crc kubenswrapper[4842]: W1111 13:29:51.534058 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.534231 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.652087 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.653640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.653685 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.653700 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.653728 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:51 crc kubenswrapper[4842]: E1111 13:29:51.654183 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.155:6443: connect: connection refused" node="crc" Nov 11 13:29:51 crc kubenswrapper[4842]: I1111 13:29:51.982195 4842 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.071778 4842 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09" exitCode=0 Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.071877 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.071951 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.073513 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.073570 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.073593 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.074168 4842 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6" exitCode=0 Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.074239 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.074323 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.076525 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.076582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.076596 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.077585 4842 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b" exitCode=0 Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.077674 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.077814 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.079159 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.079218 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.079245 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081299 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081677 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081748 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081763 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081777 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.081816 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.082323 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.082360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.082375 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083178 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083213 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083223 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083887 4842 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4ce9020ffee58dc5fb5c638ac437ed5af506e3e39ffd57dae97b1845f0aca703" exitCode=0 Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083937 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4ce9020ffee58dc5fb5c638ac437ed5af506e3e39ffd57dae97b1845f0aca703"} Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.083983 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.084617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.084645 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.084657 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:52 crc kubenswrapper[4842]: I1111 13:29:52.982222 4842 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:53 crc kubenswrapper[4842]: E1111 13:29:53.000167 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.155:6443: connect: connection refused" interval="3.2s" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.089004 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.089027 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.089809 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.089930 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.089995 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.091366 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.091393 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.091405 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.091423 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.092215 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.092247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.092258 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094569 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094599 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094614 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094623 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094633 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.094708 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.095361 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.095389 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.095402 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.096753 4842 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ef23368107965021b2ba73c31caab0e6fd77f2c16fd4574f80c4f5bc2c4a2e2a" exitCode=0 Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.096839 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.097195 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.097450 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ef23368107965021b2ba73c31caab0e6fd77f2c16fd4574f80c4f5bc2c4a2e2a"} Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.097727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.097751 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.097763 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.098358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.098404 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.098419 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.254730 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.257939 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.257974 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.257985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.258011 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:53 crc kubenswrapper[4842]: E1111 13:29:53.258549 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.155:6443: connect: connection refused" node="crc" Nov 11 13:29:53 crc kubenswrapper[4842]: W1111 13:29:53.504134 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:53 crc kubenswrapper[4842]: E1111 13:29:53.504239 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:53 crc kubenswrapper[4842]: W1111 13:29:53.511327 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:53 crc kubenswrapper[4842]: E1111 13:29:53.511395 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:53 crc kubenswrapper[4842]: W1111 13:29:53.517575 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.155:6443: connect: connection refused Nov 11 13:29:53 crc kubenswrapper[4842]: E1111 13:29:53.517671 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.155:6443: connect: connection refused" logger="UnhandledError" Nov 11 13:29:53 crc kubenswrapper[4842]: I1111 13:29:53.999842 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.101235 4842 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="eac2e8d4d90c6372e5905dd67441e7a8d78a142497c877025b35b44183cd2c71" exitCode=0 Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.101393 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.101435 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.101939 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"eac2e8d4d90c6372e5905dd67441e7a8d78a142497c877025b35b44183cd2c71"} Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.102009 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.102081 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.102162 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.102193 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.102161 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103182 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103196 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103199 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103204 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103227 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103233 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103259 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103290 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103182 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103322 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:54 crc kubenswrapper[4842]: I1111 13:29:54.103329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106758 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ffba1449f76f1a62b603e5462c3a4e3eaba2edc0657b36abce6620d062d8fbeb"} Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106800 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106822 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106804 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"76a9779f402c0a328fdbab4f2d0bda1c3a7f3b95add2e0ff108c0c0ff8ece44b"} Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106890 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"963c0a3e482d4b8cba9a61f052a6527283596313f6daf0ca8de563368f0fc1ce"} Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106917 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"088a6b389b0e139f6bc54f1e4d9cdf3bc9f539f55ddc055e034c7eb6f02e7398"} Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.106930 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8d7e8c594ea00e9f0e51639f5097b0e13235fdb16511541b5b08b9791624fe9a"} Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107808 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.107844 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.768219 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.768365 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.768398 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.769597 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.769658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:55 crc kubenswrapper[4842]: I1111 13:29:55.769667 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.012599 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.110377 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.111872 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.111911 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.111921 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.355831 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.356667 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.356766 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.358427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.358463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.358497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.459129 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.460550 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.460590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.460600 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:56 crc kubenswrapper[4842]: I1111 13:29:56.460638 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:29:57 crc kubenswrapper[4842]: I1111 13:29:57.112963 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:57 crc kubenswrapper[4842]: I1111 13:29:57.113862 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:57 crc kubenswrapper[4842]: I1111 13:29:57.113913 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:57 crc kubenswrapper[4842]: I1111 13:29:57.113931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.444369 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.444541 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.446830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.446866 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.446877 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.452000 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.460355 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.460537 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.462130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.462407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.462650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.618678 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.850239 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.920575 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.920824 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.922297 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.922425 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:58 crc kubenswrapper[4842]: I1111 13:29:58.922517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:29:59 crc kubenswrapper[4842]: I1111 13:29:59.117800 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:29:59 crc kubenswrapper[4842]: I1111 13:29:59.118575 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:29:59 crc kubenswrapper[4842]: I1111 13:29:59.118597 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:29:59 crc kubenswrapper[4842]: I1111 13:29:59.118605 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:00 crc kubenswrapper[4842]: I1111 13:30:00.121162 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:00 crc kubenswrapper[4842]: I1111 13:30:00.122364 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:00 crc kubenswrapper[4842]: I1111 13:30:00.122437 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:00 crc kubenswrapper[4842]: I1111 13:30:00.122451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:00 crc kubenswrapper[4842]: E1111 13:30:00.151037 4842 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 11 13:30:01 crc kubenswrapper[4842]: I1111 13:30:01.850748 4842 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 11 13:30:01 crc kubenswrapper[4842]: I1111 13:30:01.850835 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 13:30:03 crc kubenswrapper[4842]: I1111 13:30:03.983185 4842 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.131287 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.132645 4842 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c" exitCode=255 Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.132695 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c"} Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.132838 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.133793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.133826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.133837 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.134431 4842 scope.go:117] "RemoveContainer" containerID="df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c" Nov 11 13:30:04 crc kubenswrapper[4842]: W1111 13:30:04.326968 4842 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.327060 4842 trace.go:236] Trace[517527018]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Nov-2025 13:29:54.325) (total time: 10001ms): Nov 11 13:30:04 crc kubenswrapper[4842]: Trace[517527018]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:30:04.326) Nov 11 13:30:04 crc kubenswrapper[4842]: Trace[517527018]: [10.00138436s] [10.00138436s] END Nov 11 13:30:04 crc kubenswrapper[4842]: E1111 13:30:04.327082 4842 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.458833 4842 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.458947 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.465272 4842 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 11 13:30:04 crc kubenswrapper[4842]: I1111 13:30:04.465337 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.136976 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.138671 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec"} Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.138862 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.139644 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.139670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:05 crc kubenswrapper[4842]: I1111 13:30:05.139681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.042831 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.043051 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.044449 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.044514 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.044534 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.055199 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.142204 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.143284 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.143338 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.143351 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.361401 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.361620 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.361952 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.363358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.363409 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.363428 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:06 crc kubenswrapper[4842]: I1111 13:30:06.365977 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:07 crc kubenswrapper[4842]: I1111 13:30:07.145612 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:07 crc kubenswrapper[4842]: I1111 13:30:07.147398 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:07 crc kubenswrapper[4842]: I1111 13:30:07.147448 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:07 crc kubenswrapper[4842]: I1111 13:30:07.147464 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.147876 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.149581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.149616 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.149627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.622990 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.623145 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.624193 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.624279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.624293 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:08 crc kubenswrapper[4842]: I1111 13:30:08.936641 4842 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 11 13:30:09 crc kubenswrapper[4842]: E1111 13:30:09.461137 4842 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.462777 4842 trace.go:236] Trace[1939944862]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Nov-2025 13:29:58.453) (total time: 11009ms): Nov 11 13:30:09 crc kubenswrapper[4842]: Trace[1939944862]: ---"Objects listed" error: 11008ms (13:30:09.462) Nov 11 13:30:09 crc kubenswrapper[4842]: Trace[1939944862]: [11.009003286s] [11.009003286s] END Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.462818 4842 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.464416 4842 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.464566 4842 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.465906 4842 trace.go:236] Trace[236305515]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Nov-2025 13:29:57.694) (total time: 11771ms): Nov 11 13:30:09 crc kubenswrapper[4842]: Trace[236305515]: ---"Objects listed" error: 11771ms (13:30:09.465) Nov 11 13:30:09 crc kubenswrapper[4842]: Trace[236305515]: [11.771584821s] [11.771584821s] END Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.465943 4842 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 11 13:30:09 crc kubenswrapper[4842]: E1111 13:30:09.469950 4842 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.537499 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.541566 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.974266 4842 apiserver.go:52] "Watching apiserver" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.982660 4842 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.983076 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-f5rhw","openshift-image-registry/node-ca-p8pll","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.983623 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.983642 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.983657 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.983697 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:09 crc kubenswrapper[4842]: E1111 13:30:09.983755 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.984467 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.984522 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.984550 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:09 crc kubenswrapper[4842]: E1111 13:30:09.984597 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:09 crc kubenswrapper[4842]: E1111 13:30:09.984676 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.985080 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.989830 4842 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.996412 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.996582 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.997153 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.997440 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 11 13:30:09 crc kubenswrapper[4842]: I1111 13:30:09.998530 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.005833 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.005834 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.007083 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.007224 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.028202 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.028378 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.028536 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.028546 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.033859 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.033891 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.034173 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.061918 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067553 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067607 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067635 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067659 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067679 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067730 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067750 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067771 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067799 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067823 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067844 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067867 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067888 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067911 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.067931 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068036 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068248 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068012 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068063 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068078 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068217 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068252 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068273 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068377 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068441 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.068558 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:10.568529019 +0000 UTC m=+21.228818638 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068942 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068569 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068973 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068732 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069027 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.068809 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069075 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069119 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069142 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069159 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069180 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069203 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069229 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069252 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069275 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069293 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069310 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069331 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069349 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069367 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069389 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069413 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069434 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069459 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069480 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069500 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069522 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069548 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069570 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069590 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069611 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069633 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069652 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069669 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069172 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069712 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069220 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069401 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069733 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069407 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069439 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069488 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069506 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069763 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069528 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069561 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069628 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069678 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069828 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069932 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070016 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070169 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070258 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.069753 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070319 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070342 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070351 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070360 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070378 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070416 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070451 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070479 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070510 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070531 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070552 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070574 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070595 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070619 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070646 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070674 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070696 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070720 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070744 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070767 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070790 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070819 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070845 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070867 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070893 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070920 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070978 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071002 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071025 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071046 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071112 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071449 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071472 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071502 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071522 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071542 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071562 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071580 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071602 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071630 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071650 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071674 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071723 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071747 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071767 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071785 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071809 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071832 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071856 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071876 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071898 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071920 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071942 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071961 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.071980 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072000 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072020 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072052 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072074 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072114 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072136 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072156 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072176 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072204 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072224 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072245 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072269 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072292 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072312 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072332 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072353 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072373 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072394 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072417 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072437 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072461 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072480 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072501 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072521 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072543 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072563 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072585 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072606 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072626 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072646 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072666 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072686 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072708 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072732 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072756 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072779 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072801 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072823 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072844 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072864 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072883 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072904 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072923 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072942 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072964 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.072985 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073007 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073030 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073053 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073083 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073146 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073169 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073190 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073210 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073229 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073251 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073270 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073290 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073311 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073332 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073351 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073377 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073397 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073418 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073437 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073458 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073481 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073501 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073524 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073547 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073569 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073593 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073617 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073640 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073665 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073703 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073728 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073752 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073779 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073802 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073848 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073874 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073902 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073927 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073951 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073973 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074000 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074025 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074056 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074079 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074123 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074187 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074221 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074252 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/123a19db-ff30-45bf-913c-61f72e10cadc-hosts-file\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074276 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074301 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074329 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074354 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b2ec2204-2327-4d28-a8d3-b24380c1671c-serviceca\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074381 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074406 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074523 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074625 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2ec2204-2327-4d28-a8d3-b24380c1671c-host\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074655 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084272 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084332 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084372 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4mp\" (UniqueName: \"kubernetes.io/projected/b2ec2204-2327-4d28-a8d3-b24380c1671c-kube-api-access-np4mp\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084403 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9bq6\" (UniqueName: \"kubernetes.io/projected/123a19db-ff30-45bf-913c-61f72e10cadc-kube-api-access-l9bq6\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084433 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084473 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084504 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084641 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084660 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084678 4842 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084694 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084708 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084730 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084746 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084762 4842 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084778 4842 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084793 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084809 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084826 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084842 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084857 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084872 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084887 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084900 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084914 4842 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084928 4842 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084946 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084961 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.084976 4842 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085026 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085043 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085057 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085072 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085090 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085129 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085146 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.085160 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070451 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.091709 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070634 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.073989 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.070791 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074518 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.075087 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.075306 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.075657 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.075979 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.076758 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.077212 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.077527 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.079335 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.091964 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:10.59194227 +0000 UTC m=+21.252231889 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.085248 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.092012 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:10.592006112 +0000 UTC m=+21.252295731 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.090222 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.091694 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.091728 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.074112 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092077 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092160 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092250 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092531 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092787 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092797 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092997 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.092830 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093147 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093416 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093488 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093534 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093535 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093692 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.093839 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.094147 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.094280 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.094466 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.094613 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.094953 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.097438 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.097557 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.097645 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.097783 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.098613 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.099507 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.101776 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.102828 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.103267 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.108157 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.108644 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.108684 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.108786 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109120 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109167 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109195 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109239 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109340 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109410 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.109824 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.110368 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.110426 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.111000 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.111590 4842 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.111914 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.112186 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.112216 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.112232 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.112280 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.112300 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:10.612280705 +0000 UTC m=+21.272570504 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.112411 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.112722 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113177 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113551 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113618 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113746 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113783 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.113986 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.114416 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.114692 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.114944 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115195 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115323 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115529 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115657 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115867 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115874 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115979 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.115984 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116131 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116307 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116351 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116384 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116437 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.116560 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.117033 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.117183 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.117222 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.117625 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118019 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118042 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118156 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118242 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118287 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.118517 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.119206 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.120905 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.122121 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.122711 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.122794 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.122796 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.124637 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.125257 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.125528 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.125946 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.126303 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.126922 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.126932 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.127047 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.127185 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.127332 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128327 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128444 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128518 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128621 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128709 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.128928 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.129409 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.129777 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.129872 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130071 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130221 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130287 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130451 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130466 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.130858 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.131194 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.132827 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.132926 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.133192 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.133481 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.133713 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.133813 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134124 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134214 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134369 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134691 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134921 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.134782 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.135449 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.135488 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.135558 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.135674 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.136027 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.136369 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138246 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138329 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138489 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138494 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138625 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.139032 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.139367 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.139469 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.141702 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.142074 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.142127 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.142147 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.142229 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:10.64220224 +0000 UTC m=+21.302491859 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.142666 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.142729 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.142884 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.143218 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.143272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.143437 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.142733 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.138868 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.143546 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.144545 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.145925 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.146353 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.150185 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.150460 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.159128 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.168403 4842 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.178577 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.180217 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.183191 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186203 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4mp\" (UniqueName: \"kubernetes.io/projected/b2ec2204-2327-4d28-a8d3-b24380c1671c-kube-api-access-np4mp\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186243 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9bq6\" (UniqueName: \"kubernetes.io/projected/123a19db-ff30-45bf-913c-61f72e10cadc-kube-api-access-l9bq6\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186282 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186314 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186330 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/123a19db-ff30-45bf-913c-61f72e10cadc-hosts-file\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186402 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b2ec2204-2327-4d28-a8d3-b24380c1671c-serviceca\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186418 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2ec2204-2327-4d28-a8d3-b24380c1671c-host\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186460 4842 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186471 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186481 4842 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186490 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186498 4842 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186507 4842 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186516 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186526 4842 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186535 4842 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186544 4842 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186553 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186561 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186570 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186585 4842 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186594 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186602 4842 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186610 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186619 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186628 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186640 4842 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186648 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186657 4842 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186666 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186675 4842 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186685 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186693 4842 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186702 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186711 4842 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186720 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186729 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186739 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186765 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186774 4842 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186783 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186792 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186801 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186810 4842 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186819 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186828 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186837 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186846 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186867 4842 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186876 4842 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186886 4842 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186927 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186937 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186946 4842 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186954 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186962 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186972 4842 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186980 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186990 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.186999 4842 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.187120 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.187456 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/123a19db-ff30-45bf-913c-61f72e10cadc-hosts-file\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.187570 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188176 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188208 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188218 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188228 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188265 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188275 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188298 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188320 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188331 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188340 4842 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188350 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188367 4842 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188342 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188606 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b2ec2204-2327-4d28-a8d3-b24380c1671c-serviceca\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.188729 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b2ec2204-2327-4d28-a8d3-b24380c1671c-host\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189227 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189248 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189285 4842 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189297 4842 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189306 4842 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189315 4842 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189326 4842 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189338 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189373 4842 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189387 4842 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189396 4842 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189405 4842 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189414 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189474 4842 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189489 4842 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189500 4842 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189583 4842 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189636 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189728 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189738 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189747 4842 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189780 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189790 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189802 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189811 4842 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189820 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189830 4842 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189863 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189873 4842 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189883 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189904 4842 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189947 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189968 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.189982 4842 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192167 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192223 4842 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192274 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192291 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192305 4842 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192327 4842 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192344 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192357 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192371 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192385 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192398 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192415 4842 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192428 4842 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192443 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192457 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192472 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192485 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192498 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192511 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192525 4842 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192539 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192556 4842 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192569 4842 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192581 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192596 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192608 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192623 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192636 4842 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192650 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192664 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192677 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192691 4842 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192705 4842 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192718 4842 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192729 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192744 4842 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192758 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192772 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192785 4842 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192799 4842 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192812 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192825 4842 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192841 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192854 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192870 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192883 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192898 4842 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192910 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192920 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192934 4842 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192958 4842 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192973 4842 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.192995 4842 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193007 4842 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193020 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193037 4842 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193054 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193067 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193081 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193093 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193123 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193136 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193149 4842 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.193163 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.197918 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.208173 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4mp\" (UniqueName: \"kubernetes.io/projected/b2ec2204-2327-4d28-a8d3-b24380c1671c-kube-api-access-np4mp\") pod \"node-ca-p8pll\" (UID: \"b2ec2204-2327-4d28-a8d3-b24380c1671c\") " pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.211619 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.213994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9bq6\" (UniqueName: \"kubernetes.io/projected/123a19db-ff30-45bf-913c-61f72e10cadc-kube-api-access-l9bq6\") pod \"node-resolver-f5rhw\" (UID: \"123a19db-ff30-45bf-913c-61f72e10cadc\") " pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.221197 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.230999 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.241375 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.249881 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.260939 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.269994 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.280011 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.289862 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.294160 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.294189 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.296053 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.301339 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.308148 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 11 13:30:10 crc kubenswrapper[4842]: W1111 13:30:10.308206 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-17464c42e73ad4aef2e3e30465455ac36a1cd09a897ea5fe2b789d7e173091b2 WatchSource:0}: Error finding container 17464c42e73ad4aef2e3e30465455ac36a1cd09a897ea5fe2b789d7e173091b2: Status 404 returned error can't find the container with id 17464c42e73ad4aef2e3e30465455ac36a1cd09a897ea5fe2b789d7e173091b2 Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.316724 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.320570 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 11 13:30:10 crc kubenswrapper[4842]: W1111 13:30:10.322722 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-e8098cba8162737a9c9dd71bee9d4d1c01de709d4ae2cbf29053528b1901d71c WatchSource:0}: Error finding container e8098cba8162737a9c9dd71bee9d4d1c01de709d4ae2cbf29053528b1901d71c: Status 404 returned error can't find the container with id e8098cba8162737a9c9dd71bee9d4d1c01de709d4ae2cbf29053528b1901d71c Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.326678 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-p8pll" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.332162 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.332628 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-f5rhw" Nov 11 13:30:10 crc kubenswrapper[4842]: W1111 13:30:10.338876 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-709f0dca9a77d10fb3a51d9537c1beb317477405383d8851b2bce96f4bef94d6 WatchSource:0}: Error finding container 709f0dca9a77d10fb3a51d9537c1beb317477405383d8851b2bce96f4bef94d6: Status 404 returned error can't find the container with id 709f0dca9a77d10fb3a51d9537c1beb317477405383d8851b2bce96f4bef94d6 Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.342176 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.352140 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: W1111 13:30:10.353900 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2ec2204_2327_4d28_a8d3_b24380c1671c.slice/crio-d062a2f1a1ae534147df4ee550140a87f7e151f0ef6ad7cdc4e8ba3789ac0835 WatchSource:0}: Error finding container d062a2f1a1ae534147df4ee550140a87f7e151f0ef6ad7cdc4e8ba3789ac0835: Status 404 returned error can't find the container with id d062a2f1a1ae534147df4ee550140a87f7e151f0ef6ad7cdc4e8ba3789ac0835 Nov 11 13:30:10 crc kubenswrapper[4842]: W1111 13:30:10.368564 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod123a19db_ff30_45bf_913c_61f72e10cadc.slice/crio-749807124c4671cb4566b304af1a96983751294c227e21c6afa44fc171e80ef6 WatchSource:0}: Error finding container 749807124c4671cb4566b304af1a96983751294c227e21c6afa44fc171e80ef6: Status 404 returned error can't find the container with id 749807124c4671cb4566b304af1a96983751294c227e21c6afa44fc171e80ef6 Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.597925 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.598147 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:11.598078313 +0000 UTC m=+22.258367932 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.598304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.598342 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.598442 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.598490 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:11.598476845 +0000 UTC m=+22.258766464 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.598835 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.598868 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:11.598858787 +0000 UTC m=+22.259148406 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.634841 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-k84vc"] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.635220 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-mggn5"] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.635338 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dzhjw"] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.635642 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.635677 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.636827 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-mmt6t"] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.637545 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.637571 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.640131 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.640510 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.640929 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642191 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642297 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642463 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642505 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642641 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642693 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642722 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642645 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642895 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.642954 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.643381 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.643429 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.643388 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.643826 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.643930 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.645574 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.654220 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.661415 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.673291 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.681034 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.690243 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.698808 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.698848 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.698989 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699010 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699009 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699024 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699035 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699047 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699073 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:11.699060098 +0000 UTC m=+22.359349717 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: E1111 13:30:10.699116 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:11.699084088 +0000 UTC m=+22.359373707 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.700691 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.711582 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.720895 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.732710 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.744489 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.754145 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.765486 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.776228 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.792079 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799480 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9f3edace-782c-4646-8a57-d39d8373bb14-mcd-auth-proxy-config\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799517 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799537 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799554 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cnibin\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799590 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-system-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799607 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9f3edace-782c-4646-8a57-d39d8373bb14-proxy-tls\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799621 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799635 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799649 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-k8s-cni-cncf-io\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799675 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-system-cni-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799690 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-daemon-config\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799704 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799724 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wztg\" (UniqueName: \"kubernetes.io/projected/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-kube-api-access-7wztg\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799746 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-hostroot\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799766 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799787 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799801 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799816 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799832 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-bin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799847 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-multus\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799863 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-conf-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799880 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799904 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799922 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-os-release\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799944 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-multus-certs\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799960 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-etc-kubernetes\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799976 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.799995 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800014 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-socket-dir-parent\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800029 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-netns\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800044 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-kubelet\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800067 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800090 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800133 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-binary-copy\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800155 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-os-release\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800179 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800204 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800226 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cnibin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800264 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t72xl\" (UniqueName: \"kubernetes.io/projected/9f3edace-782c-4646-8a57-d39d8373bb14-kube-api-access-t72xl\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800278 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800295 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzc59\" (UniqueName: \"kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800309 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cni-binary-copy\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800333 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dph9\" (UniqueName: \"kubernetes.io/projected/a899ee4d-e1d3-44cc-a780-2dac60da21eb-kube-api-access-6dph9\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800350 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9f3edace-782c-4646-8a57-d39d8373bb14-rootfs\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800367 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800384 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.800401 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.802646 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.812083 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.825762 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.833910 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.841432 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.857205 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.866386 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.875802 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.884887 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.893268 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.901825 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.901893 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.901932 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.901943 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.901965 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cnibin\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902017 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cnibin\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902021 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-system-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902018 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902062 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9f3edace-782c-4646-8a57-d39d8373bb14-mcd-auth-proxy-config\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902088 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902146 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902161 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-system-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902202 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902204 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902168 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902257 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902290 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-k8s-cni-cncf-io\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902392 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-k8s-cni-cncf-io\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902341 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9f3edace-782c-4646-8a57-d39d8373bb14-proxy-tls\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902469 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902469 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-cni-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902536 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-system-cni-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902545 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902503 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-system-cni-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902629 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-daemon-config\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902663 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wztg\" (UniqueName: \"kubernetes.io/projected/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-kube-api-access-7wztg\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902695 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-hostroot\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902733 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902755 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-bin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902776 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-multus\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902797 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-conf-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902818 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902839 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902859 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902879 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902908 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902910 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9f3edace-782c-4646-8a57-d39d8373bb14-mcd-auth-proxy-config\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902927 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-os-release\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902963 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-multus-certs\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902980 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-os-release\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.902985 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-etc-kubernetes\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903002 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-netns\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903018 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-kubelet\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903037 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903051 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903068 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-socket-dir-parent\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903087 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903123 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903142 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-binary-copy\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903160 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-os-release\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903182 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-multus-certs\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903207 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903208 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903239 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-etc-kubernetes\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903250 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-os-release\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903158 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-kubelet\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903267 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-run-netns\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903273 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903093 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903291 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-conf-dir\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903419 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903420 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-hostroot\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903430 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903442 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-multus\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903431 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-host-var-lib-cni-bin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903458 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903472 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-socket-dir-parent\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903475 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-multus-daemon-config\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903494 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903501 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903502 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903502 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903485 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903533 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cnibin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903551 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903594 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t72xl\" (UniqueName: \"kubernetes.io/projected/9f3edace-782c-4646-8a57-d39d8373bb14-kube-api-access-t72xl\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903595 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cnibin\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903716 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903779 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzc59\" (UniqueName: \"kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903860 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cni-binary-copy\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.903936 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904036 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904119 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dph9\" (UniqueName: \"kubernetes.io/projected/a899ee4d-e1d3-44cc-a780-2dac60da21eb-kube-api-access-6dph9\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904149 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9f3edace-782c-4646-8a57-d39d8373bb14-rootfs\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904182 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/9f3edace-782c-4646-8a57-d39d8373bb14-rootfs\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904297 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904327 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904419 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a899ee4d-e1d3-44cc-a780-2dac60da21eb-cni-binary-copy\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.904953 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-cni-binary-copy\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.906485 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9f3edace-782c-4646-8a57-d39d8373bb14-proxy-tls\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.908054 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.918765 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dph9\" (UniqueName: \"kubernetes.io/projected/a899ee4d-e1d3-44cc-a780-2dac60da21eb-kube-api-access-6dph9\") pod \"multus-mggn5\" (UID: \"a899ee4d-e1d3-44cc-a780-2dac60da21eb\") " pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.924023 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzc59\" (UniqueName: \"kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59\") pod \"ovnkube-node-dzhjw\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.925885 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t72xl\" (UniqueName: \"kubernetes.io/projected/9f3edace-782c-4646-8a57-d39d8373bb14-kube-api-access-t72xl\") pod \"machine-config-daemon-k84vc\" (UID: \"9f3edace-782c-4646-8a57-d39d8373bb14\") " pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.926389 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wztg\" (UniqueName: \"kubernetes.io/projected/5aa4a04f-c3e1-4a93-a07a-07ba502d737f-kube-api-access-7wztg\") pod \"multus-additional-cni-plugins-mmt6t\" (UID: \"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\") " pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.955233 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mggn5" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.965434 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.972251 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:10 crc kubenswrapper[4842]: I1111 13:30:10.980209 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.058673 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.058813 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:11 crc kubenswrapper[4842]: W1111 13:30:11.074481 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda899ee4d_e1d3_44cc_a780_2dac60da21eb.slice/crio-df86fb957c3b91e041d1d91a152ac6bd8158dd33fe7f86bc5d502e0132bb51fb WatchSource:0}: Error finding container df86fb957c3b91e041d1d91a152ac6bd8158dd33fe7f86bc5d502e0132bb51fb: Status 404 returned error can't find the container with id df86fb957c3b91e041d1d91a152ac6bd8158dd33fe7f86bc5d502e0132bb51fb Nov 11 13:30:11 crc kubenswrapper[4842]: W1111 13:30:11.075797 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8bdbe88_f5ed_4117_92ea_6e1f45f6b495.slice/crio-b253f6b4c2870b007716b6cacd111c41ea996b1dac1c6c0dd61b757cf11fe8d3 WatchSource:0}: Error finding container b253f6b4c2870b007716b6cacd111c41ea996b1dac1c6c0dd61b757cf11fe8d3: Status 404 returned error can't find the container with id b253f6b4c2870b007716b6cacd111c41ea996b1dac1c6c0dd61b757cf11fe8d3 Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.159854 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-p8pll" event={"ID":"b2ec2204-2327-4d28-a8d3-b24380c1671c","Type":"ContainerStarted","Data":"5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.159919 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-p8pll" event={"ID":"b2ec2204-2327-4d28-a8d3-b24380c1671c","Type":"ContainerStarted","Data":"d062a2f1a1ae534147df4ee550140a87f7e151f0ef6ad7cdc4e8ba3789ac0835"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.160935 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"709f0dca9a77d10fb3a51d9537c1beb317477405383d8851b2bce96f4bef94d6"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.162477 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"b253f6b4c2870b007716b6cacd111c41ea996b1dac1c6c0dd61b757cf11fe8d3"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.163941 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.163985 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"17464c42e73ad4aef2e3e30465455ac36a1cd09a897ea5fe2b789d7e173091b2"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.166035 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerStarted","Data":"df86fb957c3b91e041d1d91a152ac6bd8158dd33fe7f86bc5d502e0132bb51fb"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.167380 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-f5rhw" event={"ID":"123a19db-ff30-45bf-913c-61f72e10cadc","Type":"ContainerStarted","Data":"d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.167403 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-f5rhw" event={"ID":"123a19db-ff30-45bf-913c-61f72e10cadc","Type":"ContainerStarted","Data":"749807124c4671cb4566b304af1a96983751294c227e21c6afa44fc171e80ef6"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.169434 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.169842 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerStarted","Data":"373895591aab7fde425a3aee7bcdfcb8590ef2427e49fd57a76cff9aaac9ad7a"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.171579 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"c90cc8463e1c0a052e35b7c7cbab8dcab4eeb6a34d6dc8f6fe1b649048156103"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.173363 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.173910 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.175669 4842 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec" exitCode=255 Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.175729 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.175784 4842 scope.go:117] "RemoveContainer" containerID="df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.177602 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.182267 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.182301 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.182313 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e8098cba8162737a9c9dd71bee9d4d1c01de709d4ae2cbf29053528b1901d71c"} Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.190550 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.200926 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.201732 4842 scope.go:117] "RemoveContainer" containerID="83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.201907 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.201939 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.218322 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.230814 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.248250 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.262686 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.276679 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.288011 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.299302 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.308800 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.327184 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.339074 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:03Z\\\",\\\"message\\\":\\\"W1111 13:29:53.110630 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1111 13:29:53.111281 1 crypto.go:601] Generating new CA for check-endpoints-signer@1762867793 cert, and key in /tmp/serving-cert-3402689851/serving-signer.crt, /tmp/serving-cert-3402689851/serving-signer.key\\\\nI1111 13:29:53.477366 1 observer_polling.go:159] Starting file observer\\\\nW1111 13:29:53.480750 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1111 13:29:53.480937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:29:53.483319 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3402689851/tls.crt::/tmp/serving-cert-3402689851/tls.key\\\\\\\"\\\\nF1111 13:30:03.856186 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.365238 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.404372 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.427135 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.445259 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.483295 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.526887 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.565867 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.606597 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.612029 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.612239 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:13.612200776 +0000 UTC m=+24.272490395 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.612317 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.612390 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.612498 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.612603 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:13.612584629 +0000 UTC m=+24.272874248 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.612520 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.612678 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:13.612665081 +0000 UTC m=+24.272954700 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.643484 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.685523 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.713998 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.714054 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714172 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714200 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714214 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714172 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714267 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:13.714252015 +0000 UTC m=+24.374541634 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714274 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714284 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:11 crc kubenswrapper[4842]: E1111 13:30:11.714323 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:13.714312257 +0000 UTC m=+24.374601876 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.724374 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.763892 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.802234 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:11 crc kubenswrapper[4842]: I1111 13:30:11.848489 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:11Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.058919 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.058942 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:12 crc kubenswrapper[4842]: E1111 13:30:12.059267 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:12 crc kubenswrapper[4842]: E1111 13:30:12.059150 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.063671 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.064230 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.065483 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.066181 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.067230 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.067718 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.068283 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.069207 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.069858 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.070791 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.071427 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.072540 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.073058 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.073610 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.074498 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.075037 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.076646 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.077564 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.078713 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.080452 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.081253 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.082045 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.082657 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.083414 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.083910 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.084529 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.100711 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.101289 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.102515 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.103113 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.103690 4842 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.104318 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.105851 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.108035 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.108469 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.110074 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.110838 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.111462 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.112119 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.121297 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.121801 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.122762 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.123398 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.124326 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.124805 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.129004 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.130159 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.131534 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.132130 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.133319 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.133947 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.134745 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.136344 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.136984 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.186057 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerStarted","Data":"2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c"} Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.188010 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.190636 4842 scope.go:117] "RemoveContainer" containerID="83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec" Nov 11 13:30:12 crc kubenswrapper[4842]: E1111 13:30:12.190782 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.192653 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerStarted","Data":"fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1"} Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.193876 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" exitCode=0 Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.193929 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.196171 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d"} Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.196229 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a"} Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.199201 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.209560 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.225340 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df03f17d23bf6355100b5b75db5b9fbbeb8c8646530f1a4984e43e3c5d510c8c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:03Z\\\",\\\"message\\\":\\\"W1111 13:29:53.110630 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1111 13:29:53.111281 1 crypto.go:601] Generating new CA for check-endpoints-signer@1762867793 cert, and key in /tmp/serving-cert-3402689851/serving-signer.crt, /tmp/serving-cert-3402689851/serving-signer.key\\\\nI1111 13:29:53.477366 1 observer_polling.go:159] Starting file observer\\\\nW1111 13:29:53.480750 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1111 13:29:53.480937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:29:53.483319 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3402689851/tls.crt::/tmp/serving-cert-3402689851/tls.key\\\\\\\"\\\\nF1111 13:30:03.856186 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.237020 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.247997 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.260615 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.271304 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.282149 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.293671 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.304432 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.314625 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.323074 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.361995 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.408980 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.444424 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.483860 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.524706 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.568085 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.607952 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.647642 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.686819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.725043 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.765765 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.811363 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.845180 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.892604 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.926665 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:12 crc kubenswrapper[4842]: I1111 13:30:12.970163 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:12Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.058000 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.058366 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.200676 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1" exitCode=0 Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.200775 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1"} Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.206973 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6"} Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.215606 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.216017 4842 scope.go:117] "RemoveContainer" containerID="83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec" Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.216162 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.216808 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.242907 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.259113 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.270399 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.283074 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.293380 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.304088 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.323084 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.333130 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.363991 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.405522 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.443503 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.484717 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.527055 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.561890 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.619896 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.645425 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.650432 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.650565 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.650582 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:17.65056041 +0000 UTC m=+28.310850029 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.650605 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.650684 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.650717 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.650725 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:17.650718585 +0000 UTC m=+28.311008204 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.650795 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:17.650778977 +0000 UTC m=+28.311068656 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.683993 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.727042 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.751403 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.751518 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751646 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751683 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751691 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751720 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751736 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751797 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:17.751775413 +0000 UTC m=+28.412065032 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751697 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:13 crc kubenswrapper[4842]: E1111 13:30:13.751880 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:17.751858665 +0000 UTC m=+28.412148284 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.764027 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.806601 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.847194 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.892634 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.924712 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:13 crc kubenswrapper[4842]: I1111 13:30:13.964876 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:13Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.007237 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.046917 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.058426 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.058426 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:14 crc kubenswrapper[4842]: E1111 13:30:14.058742 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:14 crc kubenswrapper[4842]: E1111 13:30:14.058657 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.086340 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.221826 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerStarted","Data":"f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0"} Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.225301 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.225341 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.233449 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.248078 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.266968 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.280889 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.295004 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.325835 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.364011 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.404586 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.453865 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.485549 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.525024 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.565475 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.604871 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:14 crc kubenswrapper[4842]: I1111 13:30:14.646870 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.058169 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:15 crc kubenswrapper[4842]: E1111 13:30:15.058793 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.229269 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0" exitCode=0 Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.229352 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0"} Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.234930 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.234966 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.234978 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.249268 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.260619 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.278423 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.289956 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.303424 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.313339 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.322728 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.345797 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.360190 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.374800 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.387259 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.397906 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.409672 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.425314 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.870554 4842 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.874685 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.874758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.874778 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.874897 4842 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.885629 4842 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.885980 4842 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.887865 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.887929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.887960 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.887994 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.888020 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:15Z","lastTransitionTime":"2025-11-11T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:15 crc kubenswrapper[4842]: E1111 13:30:15.919837 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.924504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.924562 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.924577 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.924654 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.925143 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:15Z","lastTransitionTime":"2025-11-11T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:15 crc kubenswrapper[4842]: E1111 13:30:15.945554 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.951152 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.951234 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.951248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.951266 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.951622 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:15Z","lastTransitionTime":"2025-11-11T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:15 crc kubenswrapper[4842]: E1111 13:30:15.973820 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.978798 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.978859 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.978876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.978942 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:15 crc kubenswrapper[4842]: I1111 13:30:15.978964 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:15Z","lastTransitionTime":"2025-11-11T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:15 crc kubenswrapper[4842]: E1111 13:30:15.996701 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:15Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.001782 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.001817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.001830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.001851 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.001865 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: E1111 13:30:16.020362 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: E1111 13:30:16.020767 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.024603 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.024681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.024703 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.024736 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.024760 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.058445 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:16 crc kubenswrapper[4842]: E1111 13:30:16.058648 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.058450 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:16 crc kubenswrapper[4842]: E1111 13:30:16.059042 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.127450 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.127497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.127514 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.127535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.127548 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.235692 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.235818 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.235858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.235888 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.235910 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.241588 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498" exitCode=0 Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.241642 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.269767 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.285687 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.299877 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.313447 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.326221 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.337756 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.338666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.338704 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.338716 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.338731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.338740 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.347000 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.356025 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.371691 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.384321 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.395819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.407022 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.416960 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.428947 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:16Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.442437 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.442481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.442491 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.442509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.442521 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.544520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.544748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.544756 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.544771 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.544779 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.646859 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.646888 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.646898 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.646915 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.646926 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.749642 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.749683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.749693 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.749710 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.749721 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.852165 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.852217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.852231 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.852250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.852262 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.954855 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.954900 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.954909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.954927 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:16 crc kubenswrapper[4842]: I1111 13:30:16.954937 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:16Z","lastTransitionTime":"2025-11-11T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.057077 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.057144 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.057154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.057169 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.057177 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.058539 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.058692 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.160081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.160163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.160180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.160203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.160220 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.248255 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec" exitCode=0 Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.248353 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.255143 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.263199 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.263242 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.263258 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.263277 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.263290 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.270741 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.283647 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.293232 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.308299 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.325055 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.335332 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.351354 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.366883 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.366952 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.366967 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.366986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.367001 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.367209 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.383137 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.396645 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.413614 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.426525 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.439576 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.455146 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.469495 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.469530 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.469541 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.469559 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.469569 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.572533 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.572571 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.572581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.572599 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.572611 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.675553 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.675590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.675601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.675618 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.675629 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.722291 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.722490 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.722463887 +0000 UTC m=+36.382753506 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.722600 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.722633 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.722708 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.722740 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.722734346 +0000 UTC m=+36.383023965 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.723059 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.723155 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.723136838 +0000 UTC m=+36.383426497 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.777596 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.777643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.777655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.777672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.777685 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.823578 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.823624 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823760 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823776 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823771 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823812 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823827 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823880 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.823864175 +0000 UTC m=+36.484153794 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.823786 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:17 crc kubenswrapper[4842]: E1111 13:30:17.824210 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.824201925 +0000 UTC m=+36.484491544 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.880249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.880296 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.880307 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.880326 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.880337 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.982637 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.982673 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.982683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.982696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:17 crc kubenswrapper[4842]: I1111 13:30:17.982705 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:17Z","lastTransitionTime":"2025-11-11T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.058476 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.058551 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:18 crc kubenswrapper[4842]: E1111 13:30:18.058615 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:18 crc kubenswrapper[4842]: E1111 13:30:18.058770 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.085415 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.085466 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.085477 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.085498 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.085511 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.188412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.188459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.188472 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.188493 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.188506 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.264481 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3" exitCode=0 Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.264527 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.291518 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.291569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.291587 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.291606 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.291623 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.292905 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.306093 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.315923 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.329451 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.347755 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.368020 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.387678 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.394913 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.394963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.394976 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.394997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.395012 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.405230 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.419753 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.434452 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.447487 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.465743 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.479690 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.493906 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:18Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.497772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.497798 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.497807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.497821 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.497833 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.600738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.600785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.600794 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.600811 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.600822 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.702791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.702831 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.702842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.702858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.702868 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.805672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.805752 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.805776 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.805808 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.805830 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.909174 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.909225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.909240 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.909262 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:18 crc kubenswrapper[4842]: I1111 13:30:18.909275 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:18Z","lastTransitionTime":"2025-11-11T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.011651 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.011709 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.011723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.011748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.011762 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.058452 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:19 crc kubenswrapper[4842]: E1111 13:30:19.058708 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.115592 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.115640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.115649 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.115666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.115677 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.217512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.217897 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.217919 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.217943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.218003 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.270882 4842 generic.go:334] "Generic (PLEG): container finished" podID="5aa4a04f-c3e1-4a93-a07a-07ba502d737f" containerID="6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb" exitCode=0 Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.270928 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerDied","Data":"6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.294332 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.309523 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.320729 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.320783 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.320800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.320821 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.320837 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.331949 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.350934 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.368034 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.385294 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.398597 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.412799 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.423538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.423567 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.423576 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.423590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.423599 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.435619 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.449015 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.466508 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.479180 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.491183 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.507340 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.526586 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.526681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.526695 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.526723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.526740 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.629278 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.629329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.629338 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.629355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.629368 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.731213 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.731254 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.731263 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.731278 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.731288 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.833996 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.834029 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.834039 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.834053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.834063 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.938504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.938571 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.938590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.938619 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:19 crc kubenswrapper[4842]: I1111 13:30:19.938638 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:19Z","lastTransitionTime":"2025-11-11T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.041986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.042043 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.042054 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.042074 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.042087 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.058366 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.058444 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:20 crc kubenswrapper[4842]: E1111 13:30:20.058590 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:20 crc kubenswrapper[4842]: E1111 13:30:20.058686 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.072660 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.085667 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.098814 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.111348 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.125087 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.140626 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.144215 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.144249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.144261 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.144280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.144291 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.155210 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.169630 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.186591 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.196569 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.206672 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.216158 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.230066 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246414 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246475 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246496 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246508 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.246480 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.365058 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.365116 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.365132 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.365155 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.365167 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.369274 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.369669 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.369770 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.373957 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" event={"ID":"5aa4a04f-c3e1-4a93-a07a-07ba502d737f","Type":"ContainerStarted","Data":"5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.384935 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.397120 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.397502 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.411761 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.427451 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.444573 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.467790 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.467836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.467878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.467901 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.467915 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.470812 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.484521 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.498452 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.511744 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.523421 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.534642 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.552852 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.564970 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.570371 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.570411 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.570427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.570447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.570461 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.578915 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.590349 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.600628 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.620583 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.633630 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.647216 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.660613 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.672112 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.673241 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.673291 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.673302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.673317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.673327 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.682899 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.696302 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.708819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.722973 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.735621 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.748020 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.759141 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.775899 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.775942 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.775952 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.775968 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.775978 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.878586 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.878635 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.878647 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.878710 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.878727 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.973110 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.981145 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.981202 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.981221 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.981247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:20 crc kubenswrapper[4842]: I1111 13:30:20.981264 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:20Z","lastTransitionTime":"2025-11-11T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.000269 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.015917 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.028014 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.048944 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.058984 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:21 crc kubenswrapper[4842]: E1111 13:30:21.059136 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.064703 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.078247 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.086366 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.086396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.086404 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.086418 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.086427 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.089807 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.102519 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.113030 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.125904 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.135733 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.146547 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.156729 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.167396 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.187530 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:21Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.192496 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.192546 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.192555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.192571 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.192595 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.294856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.294921 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.294932 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.294953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.294966 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.397004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.397048 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.397064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.397080 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.397088 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.499782 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.499824 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.499835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.499852 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.499865 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.601800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.601840 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.601857 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.601875 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.601886 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.704375 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.704410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.704422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.704439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.704449 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.807698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.807742 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.807755 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.807773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.807785 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.910824 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.910851 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.910859 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.910873 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:21 crc kubenswrapper[4842]: I1111 13:30:21.910881 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:21Z","lastTransitionTime":"2025-11-11T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.013454 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.013509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.013520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.013536 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.013548 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.059049 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.059131 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:22 crc kubenswrapper[4842]: E1111 13:30:22.059220 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:22 crc kubenswrapper[4842]: E1111 13:30:22.059368 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.116338 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.116378 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.116390 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.116407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.116418 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.218650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.218686 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.218696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.218711 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.218720 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.320677 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.320726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.320734 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.320749 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.320758 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.422666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.422712 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.422725 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.422740 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.422751 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.525170 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.525225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.525234 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.525248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.525257 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.627964 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.628007 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.628019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.628038 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.628049 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.730450 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.730497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.730508 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.730524 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.730534 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.833177 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.833223 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.833234 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.833251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.833263 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.935908 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.935973 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.935987 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.936007 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:22 crc kubenswrapper[4842]: I1111 13:30:22.936019 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:22Z","lastTransitionTime":"2025-11-11T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.040410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.040467 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.040528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.041131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.041162 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.058803 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:23 crc kubenswrapper[4842]: E1111 13:30:23.058970 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.143385 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.143430 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.143440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.143455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.143466 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.195752 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt"] Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.196205 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.198191 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.198923 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.213557 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.226673 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.236864 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.245306 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.245335 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.245343 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.245357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.245367 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.249407 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.261745 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.271214 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.289376 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.293958 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c48d9130-053f-49a1-b719-2cf8535aee1c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.294025 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.294055 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.294119 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztcsf\" (UniqueName: \"kubernetes.io/projected/c48d9130-053f-49a1-b719-2cf8535aee1c-kube-api-access-ztcsf\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.299323 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.309343 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.319898 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.331252 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.342323 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.346963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.346988 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.346997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.347011 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.347020 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.354774 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.367047 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.377041 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.385648 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/0.log" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.388090 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505" exitCode=1 Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.388147 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.389647 4842 scope.go:117] "RemoveContainer" containerID="f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.394934 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztcsf\" (UniqueName: \"kubernetes.io/projected/c48d9130-053f-49a1-b719-2cf8535aee1c-kube-api-access-ztcsf\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.394978 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c48d9130-053f-49a1-b719-2cf8535aee1c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.395043 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.395072 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.395626 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.395781 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c48d9130-053f-49a1-b719-2cf8535aee1c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.401241 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c48d9130-053f-49a1-b719-2cf8535aee1c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.404954 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.416736 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.417715 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztcsf\" (UniqueName: \"kubernetes.io/projected/c48d9130-053f-49a1-b719-2cf8535aee1c-kube-api-access-ztcsf\") pod \"ovnkube-control-plane-749d76644c-r96nt\" (UID: \"c48d9130-053f-49a1-b719-2cf8535aee1c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.434025 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.448364 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.451012 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.451054 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.451064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.451080 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.451091 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.462020 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.477002 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.490670 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.500847 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.511612 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.513650 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" Nov 11 13:30:23 crc kubenswrapper[4842]: W1111 13:30:23.528584 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc48d9130_053f_49a1_b719_2cf8535aee1c.slice/crio-81c604b1558b134ae6a184eccf9b6ca98cc7548f624b67db9a31b29bde228441 WatchSource:0}: Error finding container 81c604b1558b134ae6a184eccf9b6ca98cc7548f624b67db9a31b29bde228441: Status 404 returned error can't find the container with id 81c604b1558b134ae6a184eccf9b6ca98cc7548f624b67db9a31b29bde228441 Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.530420 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"111 13:30:23.209855 6159 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.209924 6159 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.210467 6159 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1111 13:30:23.210518 6159 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1111 13:30:23.210530 6159 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1111 13:30:23.210542 6159 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1111 13:30:23.210556 6159 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1111 13:30:23.210565 6159 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1111 13:30:23.210613 6159 factory.go:656] Stopping watch factory\\\\nI1111 13:30:23.210631 6159 ovnkube.go:599] Stopped ovnkube\\\\nI1111 13:30:23.210659 6159 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1111 13:30:23.210676 6159 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1111 13:30:23.210685 6159 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1111 13:30:23.210692 6159 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1111 13:30:23.210697 6159 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.545602 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.553394 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.553427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.553438 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.553456 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.553468 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.558740 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.574412 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.586632 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.597637 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.656247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.656288 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.656297 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.656314 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.656323 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.759562 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.759596 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.759606 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.759620 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.759630 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.862701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.862758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.862774 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.862794 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.862810 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.926179 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-hbtjv"] Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.926732 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:23 crc kubenswrapper[4842]: E1111 13:30:23.926805 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.956304 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.965227 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.965515 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.965617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.965681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.965759 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:23Z","lastTransitionTime":"2025-11-11T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:23 crc kubenswrapper[4842]: I1111 13:30:23.973465 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.002233 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.002486 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttbwz\" (UniqueName: \"kubernetes.io/projected/6b899889-1664-4e26-9cc9-0667626ac715-kube-api-access-ttbwz\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.002379 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:23Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.022854 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.045451 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.059059 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.059130 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.059228 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.059324 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068446 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068634 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068641 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068657 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.068666 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.092168 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.103937 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttbwz\" (UniqueName: \"kubernetes.io/projected/6b899889-1664-4e26-9cc9-0667626ac715-kube-api-access-ttbwz\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.104014 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.104196 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.104257 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:24.604240811 +0000 UTC m=+35.264530440 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.114720 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"111 13:30:23.209855 6159 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.209924 6159 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.210467 6159 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1111 13:30:23.210518 6159 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1111 13:30:23.210530 6159 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1111 13:30:23.210542 6159 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1111 13:30:23.210556 6159 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1111 13:30:23.210565 6159 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1111 13:30:23.210613 6159 factory.go:656] Stopping watch factory\\\\nI1111 13:30:23.210631 6159 ovnkube.go:599] Stopped ovnkube\\\\nI1111 13:30:23.210659 6159 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1111 13:30:23.210676 6159 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1111 13:30:23.210685 6159 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1111 13:30:23.210692 6159 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1111 13:30:23.210697 6159 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.122349 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttbwz\" (UniqueName: \"kubernetes.io/projected/6b899889-1664-4e26-9cc9-0667626ac715-kube-api-access-ttbwz\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.128431 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.139649 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.159364 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.170924 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.170958 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.170967 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.170983 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.170995 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.179191 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.190673 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.203992 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.215375 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.228399 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.273918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.273965 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.273974 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.273990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.274002 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.377365 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.377409 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.377420 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.377439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.377451 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.395024 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/0.log" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.398261 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.398832 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.400452 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" event={"ID":"c48d9130-053f-49a1-b719-2cf8535aee1c","Type":"ContainerStarted","Data":"27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.400490 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" event={"ID":"c48d9130-053f-49a1-b719-2cf8535aee1c","Type":"ContainerStarted","Data":"fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.400504 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" event={"ID":"c48d9130-053f-49a1-b719-2cf8535aee1c","Type":"ContainerStarted","Data":"81c604b1558b134ae6a184eccf9b6ca98cc7548f624b67db9a31b29bde228441"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.428719 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.447440 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.459067 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.476511 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.480726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.480795 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.480807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.480828 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.480840 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.493156 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.506786 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.524357 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.540160 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.555090 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.574954 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.584333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.584376 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.584391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.584409 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.584421 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.590813 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.606431 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.612376 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.612554 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:24 crc kubenswrapper[4842]: E1111 13:30:24.612605 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:25.612588002 +0000 UTC m=+36.272877611 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.620202 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.632073 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.645256 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.669676 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"111 13:30:23.209855 6159 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.209924 6159 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.210467 6159 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1111 13:30:23.210518 6159 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1111 13:30:23.210530 6159 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1111 13:30:23.210542 6159 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1111 13:30:23.210556 6159 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1111 13:30:23.210565 6159 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1111 13:30:23.210613 6159 factory.go:656] Stopping watch factory\\\\nI1111 13:30:23.210631 6159 ovnkube.go:599] Stopped ovnkube\\\\nI1111 13:30:23.210659 6159 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1111 13:30:23.210676 6159 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1111 13:30:23.210685 6159 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1111 13:30:23.210692 6159 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1111 13:30:23.210697 6159 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.686792 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.687945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.688050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.688162 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.688234 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.688291 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.707018 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.723489 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.741976 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.761463 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.778389 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.792179 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.792438 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.792555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.792621 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.792701 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.794395 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.806639 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.820214 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.839557 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"111 13:30:23.209855 6159 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.209924 6159 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.210467 6159 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1111 13:30:23.210518 6159 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1111 13:30:23.210530 6159 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1111 13:30:23.210542 6159 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1111 13:30:23.210556 6159 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1111 13:30:23.210565 6159 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1111 13:30:23.210613 6159 factory.go:656] Stopping watch factory\\\\nI1111 13:30:23.210631 6159 ovnkube.go:599] Stopped ovnkube\\\\nI1111 13:30:23.210659 6159 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1111 13:30:23.210676 6159 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1111 13:30:23.210685 6159 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1111 13:30:23.210692 6159 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1111 13:30:23.210697 6159 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.856601 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.870505 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.887151 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.895560 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.895614 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.895625 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.895643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.895656 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.906069 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.920198 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.940679 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.998618 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.998678 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.998691 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.998710 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:24 crc kubenswrapper[4842]: I1111 13:30:24.998725 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:24Z","lastTransitionTime":"2025-11-11T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.058331 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.058520 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.101743 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.101799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.101813 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.101833 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.101852 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.204739 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.204779 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.204791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.204815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.204831 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.307935 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.307974 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.307982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.307999 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.308009 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.407166 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/1.log" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.408739 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/0.log" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.409669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.409707 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.409719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.409738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.409750 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.412131 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9" exitCode=1 Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.412173 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.412220 4842 scope.go:117] "RemoveContainer" containerID="f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.412845 4842 scope.go:117] "RemoveContainer" containerID="2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.413029 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.431175 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.446247 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.460689 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.473041 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.487552 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.502643 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.512859 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.512912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.512926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.512945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.512956 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.528543 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.543537 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.556896 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.571487 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.584850 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.602533 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.616461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.616508 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.616519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.616537 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.616546 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.618981 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.626691 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.626832 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.626874 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:27.626861241 +0000 UTC m=+38.287150860 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.632525 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.646292 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.666336 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f38f4febe91909cd6f5263ded813f0a40777a086d42c7df0ccc5af334519f505\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"111 13:30:23.209855 6159 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.209924 6159 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1111 13:30:23.210467 6159 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1111 13:30:23.210518 6159 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1111 13:30:23.210530 6159 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1111 13:30:23.210542 6159 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1111 13:30:23.210556 6159 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1111 13:30:23.210565 6159 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1111 13:30:23.210613 6159 factory.go:656] Stopping watch factory\\\\nI1111 13:30:23.210631 6159 ovnkube.go:599] Stopped ovnkube\\\\nI1111 13:30:23.210659 6159 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1111 13:30:23.210676 6159 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1111 13:30:23.210685 6159 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1111 13:30:23.210692 6159 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1111 13:30:23.210697 6159 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:25Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.719205 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.719239 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.719250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.719265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.719274 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.727972 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.728179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.728201 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:30:41.728176527 +0000 UTC m=+52.388466146 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.728289 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.728401 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.728436 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.728533 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:41.728498117 +0000 UTC m=+52.388787776 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.728563 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:41.728550648 +0000 UTC m=+52.388840307 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.822768 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.822816 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.822825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.822842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.822852 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.829392 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.829445 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.829617 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.829639 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.829653 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.829709 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:41.829692579 +0000 UTC m=+52.489982198 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.830154 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.830266 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.830342 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:25 crc kubenswrapper[4842]: E1111 13:30:25.830499 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:41.830472602 +0000 UTC m=+52.490762381 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.926452 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.926488 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.926498 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.926515 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:25 crc kubenswrapper[4842]: I1111 13:30:25.926526 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:25Z","lastTransitionTime":"2025-11-11T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.029675 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.029731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.029746 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.029773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.029809 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.058726 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.058771 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.058734 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.058991 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.059240 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.059366 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.132653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.132700 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.132709 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.132730 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.132743 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.141394 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.141462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.141477 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.141507 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.141526 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.158483 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.162698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.162789 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.162803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.162825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.162838 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.175561 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.178851 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.178894 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.178932 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.178955 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.178970 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.191278 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.194833 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.194879 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.194891 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.194911 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.194925 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.218439 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.223298 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.223441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.223463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.223492 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.223512 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.242851 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.243352 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.245306 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.245372 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.245391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.245421 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.245441 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.348703 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.348741 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.348754 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.348773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.348787 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.419600 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/1.log" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.426892 4842 scope.go:117] "RemoveContainer" containerID="2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9" Nov 11 13:30:26 crc kubenswrapper[4842]: E1111 13:30:26.431041 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.443204 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.451316 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.451402 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.451433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.451468 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.451494 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.460064 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.480570 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.494014 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.513531 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.529602 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.546805 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.554056 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.554095 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.554119 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.554136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.554148 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.567774 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.582375 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.601546 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.615819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.630417 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.643855 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.657704 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.657742 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.657752 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.657767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.657778 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.658245 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.671550 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.687563 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:26Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.761180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.761229 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.761240 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.761257 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.761267 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.864186 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.864252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.864287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.864318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.864339 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.968280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.968340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.968360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.968386 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:26 crc kubenswrapper[4842]: I1111 13:30:26.968408 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:26Z","lastTransitionTime":"2025-11-11T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.058885 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:27 crc kubenswrapper[4842]: E1111 13:30:27.059184 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.071721 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.071816 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.071838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.071867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.071889 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.174753 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.174791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.174800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.174815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.174824 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.279064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.279171 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.279197 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.279236 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.279260 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.382968 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.383094 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.383156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.383195 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.383213 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.488276 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.488315 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.488327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.488348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.488360 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.591135 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.591203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.591216 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.591238 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.591257 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.652267 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:27 crc kubenswrapper[4842]: E1111 13:30:27.652743 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:27 crc kubenswrapper[4842]: E1111 13:30:27.652928 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:31.652900229 +0000 UTC m=+42.313190028 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.693970 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.694270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.694346 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.694439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.694500 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.797655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.797917 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.798020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.798084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.798176 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.901431 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.901723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.901788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.901876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:27 crc kubenswrapper[4842]: I1111 13:30:27.901941 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:27Z","lastTransitionTime":"2025-11-11T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.005328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.005558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.005652 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.005744 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.005851 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.058689 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.058707 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.058708 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:28 crc kubenswrapper[4842]: E1111 13:30:28.058899 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:28 crc kubenswrapper[4842]: E1111 13:30:28.058986 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:28 crc kubenswrapper[4842]: E1111 13:30:28.059067 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.059642 4842 scope.go:117] "RemoveContainer" containerID="83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.108346 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.108379 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.108391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.108407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.108419 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.210304 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.210351 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.210360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.210374 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.210384 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.312909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.312987 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.313005 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.313029 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.313046 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.415764 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.415805 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.415813 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.415828 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.415837 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.518558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.518592 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.518601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.518615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.518624 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.621526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.621565 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.621576 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.621591 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.621602 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.724585 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.724896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.725004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.725140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.725239 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.828303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.828341 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.828352 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.828373 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.828386 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.930843 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.931140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.931287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.931411 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:28 crc kubenswrapper[4842]: I1111 13:30:28.931520 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:28Z","lastTransitionTime":"2025-11-11T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.036346 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.036385 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.036397 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.036415 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.036428 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.058207 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:29 crc kubenswrapper[4842]: E1111 13:30:29.058359 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.139360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.139391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.139399 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.139413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.139423 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.240919 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.240949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.240958 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.240973 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.240982 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.343394 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.343433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.343446 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.343462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.343474 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.434480 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.436673 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.437114 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.445904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.445951 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.446002 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.446028 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.446043 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.458923 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.470984 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.483386 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.496485 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.510969 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.523358 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.535773 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.545187 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.548211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.548227 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.548236 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.548249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.548276 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.600314 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.622703 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.634016 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.644498 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.650544 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.650573 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.650581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.650595 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.650605 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.655993 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.668173 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.680034 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.692117 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:29Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.752733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.752773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.752785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.752804 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.752814 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.855050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.855084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.855095 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.855131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.855142 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.956920 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.957255 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.957339 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.957445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:29 crc kubenswrapper[4842]: I1111 13:30:29.957534 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:29Z","lastTransitionTime":"2025-11-11T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.058415 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.058434 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:30 crc kubenswrapper[4842]: E1111 13:30:30.058564 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.058518 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:30 crc kubenswrapper[4842]: E1111 13:30:30.058613 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:30 crc kubenswrapper[4842]: E1111 13:30:30.058761 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.060437 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.060470 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.060483 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.060501 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.060513 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.069423 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.078657 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.096332 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.107799 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.120234 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.133477 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.147222 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.162246 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.162283 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.162295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.162315 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.162332 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.166023 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.179687 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.193913 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.206337 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.218422 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.231265 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.244894 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.258640 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.263904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.263942 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.263953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.263970 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.263982 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.274356 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.366340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.366381 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.366391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.366407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.366416 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.469248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.469320 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.469335 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.469354 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.469367 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.571853 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.571894 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.571903 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.571918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.571927 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.674279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.674311 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.674320 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.674333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.674342 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.777818 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.777858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.777866 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.777882 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.777892 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.880419 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.880448 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.880457 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.880478 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.880496 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.983410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.983439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.983447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.983461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:30 crc kubenswrapper[4842]: I1111 13:30:30.983470 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:30Z","lastTransitionTime":"2025-11-11T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.058771 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:31 crc kubenswrapper[4842]: E1111 13:30:31.058967 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.086018 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.086069 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.086077 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.086094 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.086130 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.188359 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.188401 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.188412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.188428 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.188438 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.291092 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.291158 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.291169 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.291185 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.291196 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.393211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.393255 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.393266 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.393292 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.393307 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.495676 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.495720 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.495734 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.495755 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.495769 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.598200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.598243 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.598254 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.598273 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.598286 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.701030 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.701072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.701082 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.701296 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.701306 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.708680 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:31 crc kubenswrapper[4842]: E1111 13:30:31.708819 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:31 crc kubenswrapper[4842]: E1111 13:30:31.708886 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:39.70886838 +0000 UTC m=+50.369157999 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.803955 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.804040 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.804059 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.804089 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.804159 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.907903 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.907980 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.908001 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.908020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:31 crc kubenswrapper[4842]: I1111 13:30:31.908033 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:31Z","lastTransitionTime":"2025-11-11T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.011081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.011138 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.011149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.011163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.011176 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.058997 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.059228 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.059229 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:32 crc kubenswrapper[4842]: E1111 13:30:32.059358 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:32 crc kubenswrapper[4842]: E1111 13:30:32.059698 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:32 crc kubenswrapper[4842]: E1111 13:30:32.059542 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.113455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.113487 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.113497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.113512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.113523 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.216275 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.216358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.216379 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.216407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.216426 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.319581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.319646 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.319661 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.319684 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.319703 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.422462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.422533 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.422551 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.422573 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.422588 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.525197 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.525242 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.525250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.525265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.525276 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.628751 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.628802 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.628813 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.628830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.628844 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.731485 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.731538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.731550 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.731568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.731580 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.835445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.835514 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.835560 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.835589 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.835612 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.938528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.938602 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.938621 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.938650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:32 crc kubenswrapper[4842]: I1111 13:30:32.938674 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:32Z","lastTransitionTime":"2025-11-11T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.041915 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.041990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.042004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.042024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.042038 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.058647 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:33 crc kubenswrapper[4842]: E1111 13:30:33.058881 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.145448 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.145497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.145509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.145528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.145542 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.248696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.248744 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.248757 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.248777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.248790 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.351473 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.351518 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.351530 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.351550 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.351562 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.453725 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.453777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.453788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.453806 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.453820 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.556229 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.556280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.556291 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.556311 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.556324 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.658413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.658459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.658470 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.658488 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.658503 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.760891 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.760945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.760963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.760980 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.760990 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.863407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.863437 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.863445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.863460 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.863469 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.967094 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.967210 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.967277 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.967306 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:33 crc kubenswrapper[4842]: I1111 13:30:33.967321 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:33Z","lastTransitionTime":"2025-11-11T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.058812 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.058847 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:34 crc kubenswrapper[4842]: E1111 13:30:34.059066 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.058870 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:34 crc kubenswrapper[4842]: E1111 13:30:34.059192 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:34 crc kubenswrapper[4842]: E1111 13:30:34.059373 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.070019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.070057 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.070067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.070084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.070139 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.173395 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.173453 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.173469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.173497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.173512 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.277033 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.277168 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.277190 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.277220 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.277239 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.380430 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.380468 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.380477 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.380492 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.380501 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.483545 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.483612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.483634 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.483658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.483678 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.587500 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.587553 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.587566 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.587588 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.587605 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.691595 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.691664 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.691679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.691704 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.691722 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.794846 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.794899 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.794912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.794934 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.794950 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.897831 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.897879 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.897891 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.897910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:34 crc kubenswrapper[4842]: I1111 13:30:34.897922 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:34Z","lastTransitionTime":"2025-11-11T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.000789 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.000863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.000882 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.000912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.000932 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.058225 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:35 crc kubenswrapper[4842]: E1111 13:30:35.058512 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.104362 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.104435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.104459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.104492 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.104519 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.208521 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.208571 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.208583 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.208601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.208611 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.311017 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.311187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.311203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.311226 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.311239 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.414780 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.414876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.414899 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.414931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.414956 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.518538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.518589 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.518601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.518619 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.519086 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.622157 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.622222 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.622234 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.622293 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.622313 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.725530 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.725578 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.725587 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.725609 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.725620 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.828174 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.828285 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.828313 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.828359 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.828388 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.931435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.931488 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.931502 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.931526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:35 crc kubenswrapper[4842]: I1111 13:30:35.931542 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:35Z","lastTransitionTime":"2025-11-11T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.035611 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.035683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.035698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.035748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.035766 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.058963 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.058967 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.059134 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.059242 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.059336 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.059512 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.138840 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.138910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.138929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.138957 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.138982 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.241396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.241440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.241451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.241469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.241482 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.344008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.344040 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.344048 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.344061 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.344069 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.446733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.446793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.446811 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.446835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.446855 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.509707 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.509747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.509760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.509777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.509786 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.521035 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.524244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.524288 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.524295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.524314 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.524327 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.538699 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.542300 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.542340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.542349 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.542367 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.542377 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.576681 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.583451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.583493 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.583503 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.583520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.583531 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.606871 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.610043 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.610071 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.610081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.610112 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.610126 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.620824 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:36 crc kubenswrapper[4842]: E1111 13:30:36.620935 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.622130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.622158 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.622168 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.622183 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.622193 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.727627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.727770 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.727782 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.727799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.727978 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.831053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.831170 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.831195 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.831224 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.831246 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.934173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.934225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.934237 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.934255 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:36 crc kubenswrapper[4842]: I1111 13:30:36.934268 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:36Z","lastTransitionTime":"2025-11-11T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.036784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.036835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.036849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.036868 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.036879 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.059117 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:37 crc kubenswrapper[4842]: E1111 13:30:37.059271 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.138759 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.138794 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.138803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.138817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.138826 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.241643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.241714 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.241722 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.241737 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.241749 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.344441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.344493 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.344502 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.344519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.344531 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.446995 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.447032 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.447041 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.447056 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.447065 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.550053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.550094 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.550120 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.550137 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.550147 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.652340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.652382 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.652395 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.652412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.652423 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.754853 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.754902 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.754910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.754925 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.754934 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.857606 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.857639 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.857650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.857664 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.857673 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.960149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.960192 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.960201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.960217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:37 crc kubenswrapper[4842]: I1111 13:30:37.960226 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:37Z","lastTransitionTime":"2025-11-11T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.058423 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.058484 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.058581 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:38 crc kubenswrapper[4842]: E1111 13:30:38.058778 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:38 crc kubenswrapper[4842]: E1111 13:30:38.058858 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:38 crc kubenswrapper[4842]: E1111 13:30:38.058952 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.062192 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.062243 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.062256 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.062272 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.062284 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.164628 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.164669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.164698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.164711 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.164738 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.267220 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.267263 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.267270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.267286 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.267296 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.369931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.369977 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.369989 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.370008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.370022 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.472175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.472225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.472241 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.472264 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.472275 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.575160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.575221 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.575235 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.575253 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.575265 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.677669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.677722 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.677733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.677748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.677759 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.780233 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.780284 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.780293 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.780309 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.780319 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.882709 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.882748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.882757 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.882773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.882783 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.925494 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.946280 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.959690 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.971654 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.984329 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.985350 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.985380 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.985388 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.985403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.985415 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:38Z","lastTransitionTime":"2025-11-11T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:38 crc kubenswrapper[4842]: I1111 13:30:38.996638 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.014401 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.027204 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.036564 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.045370 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.058259 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:39 crc kubenswrapper[4842]: E1111 13:30:39.058374 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.061538 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.074265 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.085961 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.087358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.087395 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.087407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.087424 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.087436 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.097541 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.106822 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.118566 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.128613 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:39Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.189604 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.189640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.189652 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.189666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.189678 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.291931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.291997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.292019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.292038 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.292050 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.394984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.395040 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.395051 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.395073 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.395087 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.498131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.498182 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.498198 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.498222 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.498239 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.600672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.600719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.600728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.600744 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.600757 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.703275 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.703322 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.703333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.703350 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.703364 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.712432 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:39 crc kubenswrapper[4842]: E1111 13:30:39.712644 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:39 crc kubenswrapper[4842]: E1111 13:30:39.712791 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:30:55.712720132 +0000 UTC m=+66.373009791 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.806020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.806071 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.806085 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.806121 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.806135 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.909324 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.909371 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.909382 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.909403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:39 crc kubenswrapper[4842]: I1111 13:30:39.909415 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:39Z","lastTransitionTime":"2025-11-11T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.011803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.011836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.011845 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.011863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.011872 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.058313 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.058335 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:40 crc kubenswrapper[4842]: E1111 13:30:40.058693 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:40 crc kubenswrapper[4842]: E1111 13:30:40.058781 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.058616 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:40 crc kubenswrapper[4842]: E1111 13:30:40.058902 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.074770 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.086047 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.097751 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.110024 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.114555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.114698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.114709 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.114727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.114770 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.125650 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.140432 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.152317 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.163653 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.173920 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.191763 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.204122 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.215524 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.218343 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.218369 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.218383 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.218398 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.218408 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.225199 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.235459 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.244542 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.253241 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.319950 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.319987 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.319997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.320014 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.320025 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.422580 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.422615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.422624 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.422640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.422651 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.524606 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.524923 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.525141 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.525329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.525459 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.629212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.629266 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.629280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.629301 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.629315 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.731601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.731675 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.731700 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.731732 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.731755 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.834750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.834791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.834801 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.834822 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.834832 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.937151 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.937186 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.937201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.937217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:40 crc kubenswrapper[4842]: I1111 13:30:40.937228 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:40Z","lastTransitionTime":"2025-11-11T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.040095 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.040175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.040191 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.040211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.040224 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.058393 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.058542 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.059829 4842 scope.go:117] "RemoveContainer" containerID="2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.142961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.143014 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.143026 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.143047 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.143057 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.245329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.245362 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.245371 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.245384 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.245393 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.348013 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.348060 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.348069 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.348083 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.348092 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.450270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.450318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.450329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.450348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.450360 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.477535 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/1.log" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.480742 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.481208 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.503790 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.515999 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.527675 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.538750 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.553244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.553287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.553299 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.553318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.553330 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.554655 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.573200 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.586402 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.595357 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.607126 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.626131 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.638061 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.648908 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.655614 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.655646 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.655656 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.655671 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.655680 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.659048 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.671892 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.683864 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.695583 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.735008 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.735142 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.735171 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.735255 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.735305 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:31:13.735292552 +0000 UTC m=+84.395582171 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.735471 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:31:13.735462807 +0000 UTC m=+84.395752426 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.735647 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.735785 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:31:13.735767687 +0000 UTC m=+84.396057306 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.758090 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.758140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.758149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.758163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.758172 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.836612 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.836664 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836792 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836808 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836818 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836841 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836875 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836887 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836861 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:31:13.836848965 +0000 UTC m=+84.497138584 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:41 crc kubenswrapper[4842]: E1111 13:30:41.836957 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:31:13.836940808 +0000 UTC m=+84.497230427 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.859801 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.860051 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.860064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.860080 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.860091 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.961864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.961896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.961904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.961918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:41 crc kubenswrapper[4842]: I1111 13:30:41.961927 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:41Z","lastTransitionTime":"2025-11-11T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.059363 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:42 crc kubenswrapper[4842]: E1111 13:30:42.059478 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.059541 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.059557 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:42 crc kubenswrapper[4842]: E1111 13:30:42.059779 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:42 crc kubenswrapper[4842]: E1111 13:30:42.059863 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.065055 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.065138 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.065163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.065187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.065205 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.166786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.166815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.166825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.166838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.166847 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.270466 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.270527 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.270544 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.270568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.270588 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.372979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.373136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.373174 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.373204 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.373221 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.475707 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.475813 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.475826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.475844 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.475861 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.484893 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/2.log" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.485413 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/1.log" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.487951 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" exitCode=1 Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.488000 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.488051 4842 scope.go:117] "RemoveContainer" containerID="2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.488566 4842 scope.go:117] "RemoveContainer" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" Nov 11 13:30:42 crc kubenswrapper[4842]: E1111 13:30:42.488703 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.505627 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.517030 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.526647 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.539169 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.551884 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.563965 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.576736 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.578232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.578285 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.578299 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.578317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.578327 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.587457 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.598448 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.610523 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.622239 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.632369 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.642207 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.652487 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.661427 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.676031 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:42Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.680825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.680887 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.680910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.680962 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.680986 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.783759 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.783806 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.783817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.783852 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.783862 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.886945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.887079 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.887139 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.887174 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.887199 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.989858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.989940 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.989962 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.989991 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:42 crc kubenswrapper[4842]: I1111 13:30:42.990017 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:42Z","lastTransitionTime":"2025-11-11T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.009353 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.025323 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.029218 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.038956 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.057672 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c25441c8439078f8f4ce7494d322094c98c09dae4e73d4961280e388a18d7c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"message\\\":\\\"ugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c openshift-network-operator/iptables-alerter-4ln5h openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt openshift-image-registry/node-ca-p8pll openshift-multus/network-metrics-daemon-hbtjv openshift-ovn-kubernetes/ovnkube-node-dzhjw]\\\\nI1111 13:30:24.559867 6319 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1111 13:30:24.559881 6319 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559891 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559901 6319 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw in node crc\\\\nI1111 13:30:24.559907 6319 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-dzhjw after 0 failed attempt(s)\\\\nI1111 13:30:24.559912 6319 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-dzhjw\\\\nI1111 13:30:24.559926 6319 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1111 13:30:24.559984 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.058132 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:43 crc kubenswrapper[4842]: E1111 13:30:43.058249 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.069339 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.079637 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.089294 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.091802 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.091858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.091871 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.091889 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.091900 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.101980 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.112923 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.123769 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.135876 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.145004 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.156787 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.169791 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.181819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194602 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194627 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.194820 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.207198 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.297599 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.297663 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.297679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.297697 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.297709 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.399573 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.399637 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.399656 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.399679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.399698 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.492374 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/2.log" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.495995 4842 scope.go:117] "RemoveContainer" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" Nov 11 13:30:43 crc kubenswrapper[4842]: E1111 13:30:43.496250 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.501719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.501767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.501783 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.501805 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.501820 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.515081 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.528808 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.542396 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.555949 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.569649 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.584368 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.598425 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.604052 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.604086 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.604117 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.604133 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.604144 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.610819 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.625185 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.645213 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.657561 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.667879 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.678582 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.689153 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.702378 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.707156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.707188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.707200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.707217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.707230 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.713565 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.724540 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:43Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.810121 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.810161 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.810170 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.810186 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.810197 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.912934 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.912979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.912990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.913007 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:43 crc kubenswrapper[4842]: I1111 13:30:43.913017 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:43Z","lastTransitionTime":"2025-11-11T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.016127 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.016198 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.016228 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.016254 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.016270 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.058924 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.059010 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.059086 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:44 crc kubenswrapper[4842]: E1111 13:30:44.059284 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:44 crc kubenswrapper[4842]: E1111 13:30:44.059790 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:44 crc kubenswrapper[4842]: E1111 13:30:44.059925 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.119857 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.119986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.120005 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.120032 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.120079 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.223856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.223916 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.223928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.223950 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.223963 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.326368 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.326416 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.326431 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.326451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.326466 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.429726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.429762 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.429772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.429787 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.429797 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.532747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.532785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.532797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.532815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.532830 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.636701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.636890 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.636968 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.637216 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.637264 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.742455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.742504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.742519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.742545 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.742562 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.845567 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.845605 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.845617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.845634 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.845647 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.948814 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.948876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.948888 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.948906 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:44 crc kubenswrapper[4842]: I1111 13:30:44.948918 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:44Z","lastTransitionTime":"2025-11-11T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.052490 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.052557 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.052578 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.052618 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.052651 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.058875 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:45 crc kubenswrapper[4842]: E1111 13:30:45.059144 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.154884 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.154932 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.154946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.154969 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.154985 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.257901 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.257949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.257964 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.257985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.257999 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.361733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.361821 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.361839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.361871 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.361890 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.465582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.465651 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.465670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.465696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.465714 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.568892 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.568946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.568959 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.568979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.568995 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.672481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.672550 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.672566 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.672589 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.672602 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.775750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.775804 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.775815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.775836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.775848 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.879057 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.879598 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.879620 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.879644 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.879658 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.981912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.981966 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.981984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.982004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:45 crc kubenswrapper[4842]: I1111 13:30:45.982016 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:45Z","lastTransitionTime":"2025-11-11T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.059067 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.059238 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.059273 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.059362 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.059459 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.059679 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.083876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.083944 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.083957 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.083985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.084001 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.185999 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.186063 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.186075 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.186128 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.186145 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.289765 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.289837 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.289855 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.289883 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.289909 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.393188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.393247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.393261 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.393283 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.393299 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.496329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.496382 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.496400 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.496428 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.496441 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.600647 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.600721 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.600737 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.600760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.600774 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.626910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.626966 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.626982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.627002 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.627020 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.643612 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:46Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.655446 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.655512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.655528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.655554 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.655567 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.673932 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:46Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.679154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.679257 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.679280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.679328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.679352 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.695164 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:46Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.699695 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.699731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.699764 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.699785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.699799 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.713512 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:46Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.717821 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.717892 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.717909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.717930 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.717965 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.737736 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:46Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:46 crc kubenswrapper[4842]: E1111 13:30:46.737857 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.739920 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.739949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.739959 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.739991 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.740001 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.842564 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.842615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.842631 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.842653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.842669 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.945702 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.945741 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.945750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.945768 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:46 crc kubenswrapper[4842]: I1111 13:30:46.945776 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:46Z","lastTransitionTime":"2025-11-11T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.048403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.048761 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.048774 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.048809 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.048822 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.058265 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:47 crc kubenswrapper[4842]: E1111 13:30:47.058402 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.155789 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.155863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.155880 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.155904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.155950 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.259210 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.259242 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.259250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.259280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.259292 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.361327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.361421 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.361447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.361480 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.361514 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.464461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.464509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.464520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.464534 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.464544 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.568206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.568253 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.568266 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.568314 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.568324 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.672137 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.672200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.672214 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.672231 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.672243 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.774640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.774682 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.774705 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.774728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.774742 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.877038 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.877554 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.877562 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.877583 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.877592 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.980092 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.980165 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.980176 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.980192 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:47 crc kubenswrapper[4842]: I1111 13:30:47.980219 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:47Z","lastTransitionTime":"2025-11-11T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.059064 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.059174 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:48 crc kubenswrapper[4842]: E1111 13:30:48.059216 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.059243 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:48 crc kubenswrapper[4842]: E1111 13:30:48.059342 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:48 crc kubenswrapper[4842]: E1111 13:30:48.059487 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.083387 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.083426 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.083438 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.083459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.083471 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.186793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.186842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.186853 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.186878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.186890 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.289569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.289637 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.289649 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.289669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.289684 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.392672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.392728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.392742 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.392779 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.392799 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.497726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.497761 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.497769 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.497791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.497801 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.600561 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.600601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.600610 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.600625 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.600637 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.702889 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.702935 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.702946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.702969 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.702982 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.805314 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.805368 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.805379 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.805422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.805434 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.908065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.908122 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.908133 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.908150 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:48 crc kubenswrapper[4842]: I1111 13:30:48.908161 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:48Z","lastTransitionTime":"2025-11-11T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.013078 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.013241 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.013443 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.013458 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.013467 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.058556 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:49 crc kubenswrapper[4842]: E1111 13:30:49.058664 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.114986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.115024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.115036 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.115052 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.115063 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.217382 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.217422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.217430 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.217444 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.217453 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.321077 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.321143 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.321157 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.321175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.321187 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.424836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.424902 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.424926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.424955 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.424976 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.528209 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.528312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.528331 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.528358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.528382 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.630893 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.630936 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.630945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.630960 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.630970 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.733581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.733619 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.733632 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.733650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.733662 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.836376 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.836407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.836415 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.836427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.836436 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.938816 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.938854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.938864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.938878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:49 crc kubenswrapper[4842]: I1111 13:30:49.938888 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:49Z","lastTransitionTime":"2025-11-11T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.041458 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.041494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.041504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.041518 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.041530 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.057970 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.057970 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:50 crc kubenswrapper[4842]: E1111 13:30:50.058095 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:50 crc kubenswrapper[4842]: E1111 13:30:50.058322 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.059243 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:50 crc kubenswrapper[4842]: E1111 13:30:50.059435 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.076774 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.090675 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.104967 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.117667 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.128571 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.137910 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.144717 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.144775 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.144786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.144800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.144810 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.146948 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.159895 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.170790 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.183161 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.195016 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.207718 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.224387 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.243247 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.247796 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.247860 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.247872 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.247887 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.247897 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.259461 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.273475 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.294786 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.351699 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.351749 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.351758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.351777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.351790 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.455610 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.455922 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.455992 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.456073 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.456205 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.558498 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.558847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.558863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.558884 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.558900 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.661458 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.661702 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.661772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.661839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.661898 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.763743 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.763785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.763797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.763815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.763826 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.866627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.866669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.866682 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.866701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.866714 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.970188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.971205 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.971237 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.971261 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:50 crc kubenswrapper[4842]: I1111 13:30:50.971277 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:50Z","lastTransitionTime":"2025-11-11T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.058510 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:51 crc kubenswrapper[4842]: E1111 13:30:51.058623 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.073403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.073446 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.073457 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.073474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.073483 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.179474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.179943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.180068 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.180212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.180323 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.282919 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.282961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.282974 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.282993 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.283009 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.386141 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.386180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.386192 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.386211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.386223 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.489015 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.489291 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.489376 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.489452 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.489521 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.591794 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.591839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.591850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.591868 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.591879 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.695001 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.695041 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.695053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.695070 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.695081 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.797122 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.797154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.797163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.797178 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.797188 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.899283 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.899555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.899664 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.899735 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:51 crc kubenswrapper[4842]: I1111 13:30:51.899795 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:51Z","lastTransitionTime":"2025-11-11T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.002591 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.002632 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.002641 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.002655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.002666 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.059417 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.059494 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.059531 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:52 crc kubenswrapper[4842]: E1111 13:30:52.059564 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:52 crc kubenswrapper[4842]: E1111 13:30:52.059697 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:52 crc kubenswrapper[4842]: E1111 13:30:52.059793 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.104519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.104562 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.104574 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.104591 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.104603 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.207377 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.207413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.207430 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.207445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.207455 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.309738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.309954 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.310058 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.310162 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.310240 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.413208 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.413250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.413263 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.413279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.413290 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.516219 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.516270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.516286 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.516312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.516346 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.618275 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.618318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.618332 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.618349 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.618359 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.720670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.720702 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.720710 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.720723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.720732 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.823706 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.823759 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.823781 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.823810 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.823832 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.926166 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.926201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.926209 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.926222 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:52 crc kubenswrapper[4842]: I1111 13:30:52.926231 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:52Z","lastTransitionTime":"2025-11-11T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.029384 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.029436 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.029451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.029471 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.029484 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.058331 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:53 crc kubenswrapper[4842]: E1111 13:30:53.058758 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.132774 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.132818 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.132827 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.132842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.132851 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.235340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.235389 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.235398 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.235412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.235421 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.338348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.338592 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.338658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.338728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.338793 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.441464 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.441547 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.441568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.442001 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.442652 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.544896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.544925 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.544935 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.544949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.544960 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.646827 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.646852 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.646860 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.646872 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.646880 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.749629 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.749677 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.749688 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.749701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.749710 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.853050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.853087 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.853112 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.853131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.853143 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.956074 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.956451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.956544 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.956619 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:53 crc kubenswrapper[4842]: I1111 13:30:53.956689 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:53Z","lastTransitionTime":"2025-11-11T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.058056 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:54 crc kubenswrapper[4842]: E1111 13:30:54.058231 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.058291 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:54 crc kubenswrapper[4842]: E1111 13:30:54.058420 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.058791 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:54 crc kubenswrapper[4842]: E1111 13:30:54.058986 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.059122 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.059148 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.059163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.059182 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.059197 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.162020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.162058 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.162070 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.162084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.162109 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.265290 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.265371 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.265394 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.265850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.266233 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.370334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.370396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.370413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.370432 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.370447 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.472377 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.472448 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.472461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.472481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.472493 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.574112 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.574159 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.574171 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.574185 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.574196 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.676615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.676647 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.676654 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.676668 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.676677 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.778895 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.778924 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.778932 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.778945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.778955 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.880737 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.880783 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.880795 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.880810 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.880821 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.983021 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.983064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.983072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.983087 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:54 crc kubenswrapper[4842]: I1111 13:30:54.983111 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:54Z","lastTransitionTime":"2025-11-11T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.058347 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:55 crc kubenswrapper[4842]: E1111 13:30:55.058469 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.082915 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.084584 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.084617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.084626 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.084639 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.084647 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.187180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.187446 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.187521 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.187597 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.187657 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.289840 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.289867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.289875 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.289889 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.289897 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.393305 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.393377 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.393389 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.393427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.393443 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.495876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.495929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.495940 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.495957 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.495970 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.598042 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.598076 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.598084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.598118 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.598127 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.701256 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.701308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.701322 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.701342 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.701354 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.784664 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:55 crc kubenswrapper[4842]: E1111 13:30:55.784812 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:55 crc kubenswrapper[4842]: E1111 13:30:55.784900 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:31:27.784876187 +0000 UTC m=+98.445165906 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.804516 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.804556 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.804565 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.804581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.804592 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.907059 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.907112 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.907151 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.907167 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:55 crc kubenswrapper[4842]: I1111 13:30:55.907185 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:55Z","lastTransitionTime":"2025-11-11T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.009770 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.009819 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.009831 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.009849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.009860 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.058448 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.058546 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.058590 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.058678 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.058455 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.058769 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.059709 4842 scope.go:117] "RemoveContainer" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.059906 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.112074 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.112151 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.112164 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.112179 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.112191 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.214902 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.214949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.214960 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.214976 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.214988 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.317664 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.317716 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.317734 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.317759 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.317777 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.420479 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.420526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.420536 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.420552 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.420563 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.523957 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.523999 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.524010 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.524024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.524033 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.625982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.626028 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.626039 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.626055 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.626065 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.728575 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.728627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.728639 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.728655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.728668 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.830417 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.830461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.830469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.830482 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.830494 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.892249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.892286 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.892295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.892309 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.892319 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.905660 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:56Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.909211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.909434 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.909529 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.909608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.909712 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.921713 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:56Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.929041 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.929091 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.929125 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.929147 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.929163 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.945192 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:56Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.949064 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.949119 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.949131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.949150 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.949162 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.965818 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:56Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.969266 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.969296 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.969307 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.969321 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.969331 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.980074 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:56Z is after 2025-08-24T17:21:41Z" Nov 11 13:30:56 crc kubenswrapper[4842]: E1111 13:30:56.980214 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.981331 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.981435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.981501 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.981586 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:56 crc kubenswrapper[4842]: I1111 13:30:56.981667 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:56Z","lastTransitionTime":"2025-11-11T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.059059 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:57 crc kubenswrapper[4842]: E1111 13:30:57.059213 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.083947 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.083985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.083996 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.084010 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.084022 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.186355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.186625 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.186667 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.186702 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.186726 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.288912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.288961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.288969 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.289008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.289018 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.391975 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.392019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.392030 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.392047 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.392056 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.494205 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.494239 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.494250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.494265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.494276 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.597494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.597544 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.597555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.597570 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.597579 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.700249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.700294 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.700303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.700318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.700329 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.803799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.803850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.803863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.803885 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.803898 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.907608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.907655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.907665 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.907682 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:57 crc kubenswrapper[4842]: I1111 13:30:57.907693 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:57Z","lastTransitionTime":"2025-11-11T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.009812 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.009855 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.009863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.009878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.009887 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.058326 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.058397 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.058478 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:30:58 crc kubenswrapper[4842]: E1111 13:30:58.058512 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:30:58 crc kubenswrapper[4842]: E1111 13:30:58.058677 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:30:58 crc kubenswrapper[4842]: E1111 13:30:58.058804 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.111755 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.111816 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.111829 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.111846 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.111858 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.214349 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.214405 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.214417 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.214435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.214446 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.316036 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.316173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.316195 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.316215 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.316228 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.418258 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.418293 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.418302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.418317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.418327 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.520206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.520249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.520265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.520280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.520292 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.623966 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.624056 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.624075 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.624142 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.624161 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.725735 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.725765 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.725774 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.725787 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.725796 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.828269 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.828298 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.828308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.828323 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.828336 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.930928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.930970 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.930982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.931000 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:58 crc kubenswrapper[4842]: I1111 13:30:58.931017 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:58Z","lastTransitionTime":"2025-11-11T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.033907 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.033946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.033959 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.033975 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.033986 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.058446 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:30:59 crc kubenswrapper[4842]: E1111 13:30:59.058567 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.136420 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.136451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.136459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.136473 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.136482 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.238845 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.238882 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.238892 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.238905 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.238915 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.341062 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.341126 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.341140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.341157 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.341166 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.443555 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.443603 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.443612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.443626 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.443636 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.545030 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.545075 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.545087 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.545124 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.545138 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.646788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.646826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.646838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.646854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.646865 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.748797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.748838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.748850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.748863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.748874 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.850923 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.850961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.850972 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.850989 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.851000 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.953507 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.953543 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.953554 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.953569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:30:59 crc kubenswrapper[4842]: I1111 13:30:59.953580 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:30:59Z","lastTransitionTime":"2025-11-11T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.056440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.056490 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.056501 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.056517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.056531 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.059024 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:00 crc kubenswrapper[4842]: E1111 13:31:00.059151 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.059296 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:00 crc kubenswrapper[4842]: E1111 13:31:00.059364 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.059594 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:00 crc kubenswrapper[4842]: E1111 13:31:00.059655 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.073675 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.086742 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.099500 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.122029 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.141238 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.158551 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.159602 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.159633 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.159653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.159705 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.159717 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.176575 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.189412 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.205068 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.219679 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.234452 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.249402 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262114 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262158 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262166 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262181 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262190 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.262398 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.277442 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.291479 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.308129 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.321184 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.336664 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.364548 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.364595 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.364603 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.364616 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.364625 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.466735 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.466769 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.466778 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.466791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.466799 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.568758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.568793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.568801 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.568815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.568824 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.671327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.671360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.671375 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.671390 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.671400 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.773809 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.773864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.773880 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.773898 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.773909 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.875439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.875472 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.875483 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.875499 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.875509 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.977400 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.977435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.977445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.977460 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:00 crc kubenswrapper[4842]: I1111 13:31:00.977471 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:00Z","lastTransitionTime":"2025-11-11T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.058349 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:01 crc kubenswrapper[4842]: E1111 13:31:01.058469 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.079379 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.079405 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.079413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.079425 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.079432 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.181301 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.181346 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.181357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.181373 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.181384 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.284013 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.284062 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.284072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.284120 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.284134 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.386835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.386878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.386889 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.386907 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.386919 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.488893 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.488944 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.488956 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.488972 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.488984 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.551079 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/0.log" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.551150 4842 generic.go:334] "Generic (PLEG): container finished" podID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" containerID="2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c" exitCode=1 Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.551182 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerDied","Data":"2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.551626 4842 scope.go:117] "RemoveContainer" containerID="2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.564388 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.576954 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.588076 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.591264 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.591290 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.591300 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.591317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.591329 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.598038 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.608245 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.624834 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.634977 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.649887 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.662442 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.673546 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.688210 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.693793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.693954 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.694020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.694089 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.694199 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.699861 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.716768 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.728528 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.748620 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.758860 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.768968 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.777089 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:01Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.797018 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.797074 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.797089 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.797127 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.797141 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.899739 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.899788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.899800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.899817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:01 crc kubenswrapper[4842]: I1111 13:31:01.899835 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:01Z","lastTransitionTime":"2025-11-11T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.002159 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.002207 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.002218 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.002235 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.002245 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.059247 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.059285 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:02 crc kubenswrapper[4842]: E1111 13:31:02.059404 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.059436 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:02 crc kubenswrapper[4842]: E1111 13:31:02.059516 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:02 crc kubenswrapper[4842]: E1111 13:31:02.059603 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.104330 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.104381 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.104396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.104413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.104428 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.207596 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.207642 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.207655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.207673 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.207684 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.309701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.309753 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.309772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.309788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.309799 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.411830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.411856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.411864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.411876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.411884 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.513967 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.514016 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.514027 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.514043 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.514054 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.555904 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/0.log" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.555959 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerStarted","Data":"969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.569790 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.584308 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.598852 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.614157 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.616643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.616691 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.616703 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.616721 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.616734 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.632871 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.646898 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.660484 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.674194 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.692648 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.705157 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.715797 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.719139 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.719179 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.719187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.719201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.719211 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.726269 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.740153 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.751338 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.761930 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.774383 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.786816 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.795928 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:02Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.821731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.821820 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.821833 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.821849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.821859 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.924251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.924312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.924327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.924344 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:02 crc kubenswrapper[4842]: I1111 13:31:02.924357 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:02Z","lastTransitionTime":"2025-11-11T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.026746 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.026807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.026827 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.026849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.026875 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.058640 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:03 crc kubenswrapper[4842]: E1111 13:31:03.058872 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.129748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.130012 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.130082 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.130164 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.130237 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.232819 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.232857 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.232867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.232881 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.232892 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.334984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.335025 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.335035 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.335051 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.335062 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.440003 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.440048 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.440061 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.440075 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.440089 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.542789 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.542830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.542841 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.542858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.542871 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.645248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.645298 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.645309 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.645325 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.645337 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.747585 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.747629 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.747638 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.747655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.747665 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.850600 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.850679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.850703 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.850728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.850744 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.953757 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.953797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.953806 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.953819 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:03 crc kubenswrapper[4842]: I1111 13:31:03.953828 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:03Z","lastTransitionTime":"2025-11-11T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.056180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.056220 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.056232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.056270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.056287 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.058707 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:04 crc kubenswrapper[4842]: E1111 13:31:04.058814 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.058990 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:04 crc kubenswrapper[4842]: E1111 13:31:04.059066 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.059370 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:04 crc kubenswrapper[4842]: E1111 13:31:04.059559 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.159513 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.159609 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.159634 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.159677 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.159707 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.261892 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.261949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.261960 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.261979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.261991 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.365130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.365214 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.365240 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.365274 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.365300 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.468701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.468833 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.468863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.468908 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.468937 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.572277 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.572328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.572342 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.572360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.572372 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.676423 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.676479 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.676503 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.676531 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.676553 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.779554 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.779616 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.779632 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.779656 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.779684 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.882873 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.882918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.882949 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.882968 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.882981 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.985784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.985840 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.985851 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.985870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:04 crc kubenswrapper[4842]: I1111 13:31:04.985880 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:04Z","lastTransitionTime":"2025-11-11T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.058830 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:05 crc kubenswrapper[4842]: E1111 13:31:05.059002 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.088223 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.088303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.088314 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.088334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.088346 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.190865 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.190909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.190924 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.190940 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.190952 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.293941 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.293984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.293997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.294011 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.294021 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.396269 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.396333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.396355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.396383 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.396404 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.499696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.499743 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.499752 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.499766 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.499775 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.602287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.602345 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.602362 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.602383 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.602399 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.704903 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.704952 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.704961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.704976 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.704986 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.808246 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.808315 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.808337 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.808370 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.808393 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.911019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.911053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.911063 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.911076 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:05 crc kubenswrapper[4842]: I1111 13:31:05.911085 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:05Z","lastTransitionTime":"2025-11-11T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.013236 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.013277 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.013288 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.013303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.013316 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.058061 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:06 crc kubenswrapper[4842]: E1111 13:31:06.058190 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.058276 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.058330 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:06 crc kubenswrapper[4842]: E1111 13:31:06.058421 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:06 crc kubenswrapper[4842]: E1111 13:31:06.058662 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.116259 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.116305 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.116313 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.116331 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.116341 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.220261 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.220330 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.220342 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.220361 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.220372 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.322233 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.322260 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.322268 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.322280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.322288 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.427248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.427318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.427370 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.427409 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.427426 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.530405 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.530442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.530452 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.530466 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.530477 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.633055 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.633124 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.633135 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.633148 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.633156 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.736029 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.736119 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.736144 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.736173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.736191 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.838310 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.838340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.838348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.838362 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.838372 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.941613 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.941663 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.941674 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.941693 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:06 crc kubenswrapper[4842]: I1111 13:31:06.941706 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:06Z","lastTransitionTime":"2025-11-11T13:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.046979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.048139 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.048201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.048239 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.048259 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.058356 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.059160 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.059590 4842 scope.go:117] "RemoveContainer" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.151286 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.151516 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.151681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.151835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.151955 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.255050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.255137 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.255152 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.255175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.255193 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.262918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.262984 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.263001 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.263027 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.263047 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.279392 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.284479 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.284521 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.284535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.284748 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.284763 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.301555 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.306039 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.306153 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.306175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.306237 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.306263 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.322440 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.326626 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.326660 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.326670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.326688 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.326699 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.343812 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.348006 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.348071 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.348089 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.348136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.348154 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.362280 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: E1111 13:31:07.362410 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.364351 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.364433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.364449 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.364478 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.364498 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.468130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.468179 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.468189 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.468217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.468236 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.575991 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.576140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.576165 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.576231 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.576249 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.580290 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/2.log" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.586312 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.587423 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.608089 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.626220 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.644918 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.663313 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.679252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.679331 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.679343 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.679365 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.679387 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.682052 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.694404 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.705164 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.725475 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.736808 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.754254 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.770172 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.782623 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.782683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.782694 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.782717 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.782733 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.789132 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.805569 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.823052 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.838878 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.854800 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.869789 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.886176 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.886261 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.886278 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.886301 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.886316 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.887792 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:07Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.988809 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.988857 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.988867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.988885 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:07 crc kubenswrapper[4842]: I1111 13:31:07.988895 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:07Z","lastTransitionTime":"2025-11-11T13:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.058861 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.058861 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:08 crc kubenswrapper[4842]: E1111 13:31:08.059031 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:08 crc kubenswrapper[4842]: E1111 13:31:08.059166 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.059388 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:08 crc kubenswrapper[4842]: E1111 13:31:08.059465 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.091339 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.091376 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.091390 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.091410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.091424 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.195564 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.195655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.195681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.195716 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.195740 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.298337 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.298406 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.298424 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.298452 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.298472 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.402230 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.402282 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.402295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.402315 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.402328 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.505923 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.505978 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.505989 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.506007 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.506020 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.592925 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/3.log" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.593678 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/2.log" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.597479 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" exitCode=1 Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.597542 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.597609 4842 scope.go:117] "RemoveContainer" containerID="88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.599821 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:31:08 crc kubenswrapper[4842]: E1111 13:31:08.600301 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.608650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.608759 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.608782 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.608961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.609078 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.618903 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.633023 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.647850 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.668155 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.680976 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.700560 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88fbe536b353b09b1d88c193242a5a521582aa31e48d2df28928751e14641736\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:30:41Z\\\",\\\"message\\\":\\\"ns:[]Condition{},},}\\\\nI1111 13:30:41.849708 6573 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-hbtjv] creating logical port openshift-multus_network-metrics-daemon-hbtjv for pod on switch crc\\\\nI1111 13:30:41.849715 6573 lb_config.go:1031] Cluster endpoints for openshift-marketplace/redhat-operators for network=default are: map[]\\\\nF1111 13:30:41.849726 6573 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:30:41Z is after 2025-08-24T17:21:41Z]\\\\nI1111 13:30:41.849731 6573 services_controller.go:443] Built service openshift-marketplace/redhat-operators LB cluster-wide configs for ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.711931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.712213 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.712226 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.712244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.712255 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.712572 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.727211 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.742213 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.757664 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.775414 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.788445 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.807797 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.814986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.815020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.815031 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.815050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.815065 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.822916 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.843781 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.861174 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.882559 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.896495 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:08Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.917414 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.917453 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.917463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.917480 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:08 crc kubenswrapper[4842]: I1111 13:31:08.917491 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:08Z","lastTransitionTime":"2025-11-11T13:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.021211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.021284 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.021309 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.021341 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.021363 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.058018 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:09 crc kubenswrapper[4842]: E1111 13:31:09.058162 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.124677 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.124746 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.124762 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.124786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.124803 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.227115 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.227145 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.227153 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.227167 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.227175 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.329733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.329803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.329815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.329836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.329851 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.433143 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.433188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.433203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.433225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.433241 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.535032 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.535124 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.535143 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.535166 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.535184 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.604898 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/3.log" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.610932 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:31:09 crc kubenswrapper[4842]: E1111 13:31:09.611342 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.621887 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.637835 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.638494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.638535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.638548 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.638596 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.638610 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.660259 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.671243 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.682578 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.692703 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.702324 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.713377 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.725163 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.735409 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.742281 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.742354 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.742387 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.742403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.742414 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.748422 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.763251 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.776184 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.788205 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.801503 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.821955 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.833041 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844344 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844367 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844375 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844387 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844395 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.844337 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:09Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.946437 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.946481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.946499 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.946518 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:09 crc kubenswrapper[4842]: I1111 13:31:09.946535 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:09Z","lastTransitionTime":"2025-11-11T13:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.049011 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.049044 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.049052 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.049067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.049076 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.058476 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:10 crc kubenswrapper[4842]: E1111 13:31:10.058576 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.058717 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:10 crc kubenswrapper[4842]: E1111 13:31:10.058772 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.058949 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:10 crc kubenswrapper[4842]: E1111 13:31:10.058992 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.074267 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.086562 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.100764 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.112408 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.123698 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.133622 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.144300 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.151268 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.151300 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.151311 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.151327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.151339 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.154371 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.170627 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.182723 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.195740 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.207853 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.222241 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.237311 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.250696 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.253609 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.253646 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.253658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.253672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.253682 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.264398 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.279293 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.290311 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:10Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.356281 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.356319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.356332 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.356346 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.356356 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.459434 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.459463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.459474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.459490 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.459501 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.563016 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.563451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.563469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.563495 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.563520 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.665209 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.665245 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.665253 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.665267 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.665276 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.767812 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.767839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.767847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.767858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.767865 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.870936 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.870985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.870996 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.871013 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.871023 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.973963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.973999 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.974008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.974024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:10 crc kubenswrapper[4842]: I1111 13:31:10.974033 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:10Z","lastTransitionTime":"2025-11-11T13:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.058619 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:11 crc kubenswrapper[4842]: E1111 13:31:11.058770 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.076130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.076165 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.076177 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.076193 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.076204 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.178802 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.178847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.178861 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.178882 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.178897 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.280750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.280784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.280793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.280807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.280815 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.382817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.382849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.382858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.382870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.382879 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.486270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.486318 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.486334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.486358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.486408 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.588091 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.588150 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.588160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.588172 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.588181 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.690811 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.690878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.690897 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.690922 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.690939 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.793638 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.793683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.793696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.793711 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.793722 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.896689 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.896740 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.896750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.896766 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:11 crc kubenswrapper[4842]: I1111 13:31:11.896777 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:11.999587 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:11.999638 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:11.999647 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:11.999672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:11.999685 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:11Z","lastTransitionTime":"2025-11-11T13:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.058225 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.058236 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:12 crc kubenswrapper[4842]: E1111 13:31:12.058464 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:12 crc kubenswrapper[4842]: E1111 13:31:12.058552 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.058267 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:12 crc kubenswrapper[4842]: E1111 13:31:12.058638 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.102374 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.102431 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.102450 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.102477 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.102492 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.204854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.204896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.204912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.204928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.204943 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.307188 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.307440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.307509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.307582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.307652 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.409554 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.409602 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.409612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.409626 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.409635 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.511765 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.511802 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.511811 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.511826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.511836 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.614126 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.614175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.614184 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.614197 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.614206 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.716517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.716549 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.716558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.716571 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.716580 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.818653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.818940 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.819018 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.819123 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.819219 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.922735 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.922817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.922836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.922870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:12 crc kubenswrapper[4842]: I1111 13:31:12.922893 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:12Z","lastTransitionTime":"2025-11-11T13:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.026046 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.026159 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.026173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.026200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.026214 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.058854 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.059372 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.128506 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.128561 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.128577 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.128598 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.128608 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.232320 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.232413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.232442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.232481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.232506 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.336488 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.336572 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.336592 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.336622 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.336649 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.439943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.440000 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.440011 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.440032 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.440043 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.543990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.544050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.544065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.544092 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.544132 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.646926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.647000 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.647019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.647052 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.647076 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.749896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.749943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.749956 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.749971 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.749981 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.780631 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.780827 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.780797458 +0000 UTC m=+148.441087077 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.780922 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.780960 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.781073 4842 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.781191 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.78117198 +0000 UTC m=+148.441461599 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.781196 4842 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.781289 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.781268173 +0000 UTC m=+148.441557792 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.852781 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.852834 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.852843 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.852861 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.852871 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.882269 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.882421 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882537 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882576 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882631 4842 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882719 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882773 4842 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882801 4842 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882726 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.882701826 +0000 UTC m=+148.542991455 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:31:13 crc kubenswrapper[4842]: E1111 13:31:13.882938 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.882894992 +0000 UTC m=+148.543184671 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.956371 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.956410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.956424 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.956441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:13 crc kubenswrapper[4842]: I1111 13:31:13.956452 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:13Z","lastTransitionTime":"2025-11-11T13:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.058447 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:14 crc kubenswrapper[4842]: E1111 13:31:14.058568 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.058448 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.058737 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:14 crc kubenswrapper[4842]: E1111 13:31:14.058795 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:14 crc kubenswrapper[4842]: E1111 13:31:14.058990 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.060060 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.060077 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.060085 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.060097 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.060120 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.162134 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.162173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.162185 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.162201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.162212 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.264818 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.264864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.264875 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.264890 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.264901 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.367875 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.367918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.367929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.367948 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.367961 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.471956 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.472004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.472019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.472036 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.472046 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.574138 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.574178 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.574187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.574201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.574211 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.676442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.676489 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.676504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.676558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.676576 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.778681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.778725 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.778733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.778747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.778757 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.881232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.881265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.881272 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.881285 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.881293 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.983723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.983777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.983788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.983804 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:14 crc kubenswrapper[4842]: I1111 13:31:14.983815 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:14Z","lastTransitionTime":"2025-11-11T13:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.058806 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:15 crc kubenswrapper[4842]: E1111 13:31:15.058947 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.086356 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.086401 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.086421 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.086441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.086498 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.189075 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.189179 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.189202 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.189227 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.189245 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.292319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.292537 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.292569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.292618 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.292637 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.395642 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.395700 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.395711 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.395732 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.395745 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.498160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.498204 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.498219 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.498239 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.498257 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.600356 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.600411 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.600433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.600453 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.600468 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.702353 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.702450 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.702463 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.702482 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.702502 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.805040 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.805141 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.805161 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.805183 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.805197 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.907791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.908041 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.908054 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.908068 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:15 crc kubenswrapper[4842]: I1111 13:31:15.908080 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:15Z","lastTransitionTime":"2025-11-11T13:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.010455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.010730 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.010815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.010890 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.010981 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.058036 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:16 crc kubenswrapper[4842]: E1111 13:31:16.058198 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.058036 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:16 crc kubenswrapper[4842]: E1111 13:31:16.058320 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.058465 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:16 crc kubenswrapper[4842]: E1111 13:31:16.058529 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.112771 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.112807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.112815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.112829 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.112838 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.214621 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.214649 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.214659 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.214671 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.214680 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.316973 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.317026 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.317049 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.317078 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.317264 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.419719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.420170 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.420315 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.420445 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.420577 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.523368 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.523403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.523413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.523429 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.523441 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.626543 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.627156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.627251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.627355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.627438 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.730540 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.730849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.730985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.731157 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.731266 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.834480 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.834517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.834528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.834544 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.834557 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.937427 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.937494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.937520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.937551 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:16 crc kubenswrapper[4842]: I1111 13:31:16.937573 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:16Z","lastTransitionTime":"2025-11-11T13:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.041035 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.041134 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.041154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.041177 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.041193 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.058373 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.058588 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.144442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.144495 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.144512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.144535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.144553 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.246712 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.246738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.246746 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.246758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.246766 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.348701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.348738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.348747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.348760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.348772 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.451563 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.451612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.451633 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.451655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.451672 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.555337 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.555385 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.555396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.555421 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.555435 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.579453 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.579523 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.579535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.579558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.579574 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.595498 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.599933 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.600060 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.600085 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.600147 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.600171 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.613512 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.617158 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.617207 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.617218 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.617237 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.617252 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.629049 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.632278 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.632334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.632348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.632473 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.632493 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.645563 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.649678 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.649719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.649731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.649750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.649762 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.660611 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:17Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:17 crc kubenswrapper[4842]: E1111 13:31:17.660749 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.662727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.662760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.662771 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.662785 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.662795 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.765493 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.765542 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.765553 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.765569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.765579 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.868206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.868248 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.868256 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.868272 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.868282 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.970186 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.970230 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.970239 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.970253 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:17 crc kubenswrapper[4842]: I1111 13:31:17.970262 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:17Z","lastTransitionTime":"2025-11-11T13:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.058648 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.058656 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.058683 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:18 crc kubenswrapper[4842]: E1111 13:31:18.058879 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:18 crc kubenswrapper[4842]: E1111 13:31:18.059141 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:18 crc kubenswrapper[4842]: E1111 13:31:18.059285 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.072183 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.072219 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.072231 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.072246 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.072257 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.174799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.174901 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.174927 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.174959 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.174982 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.277876 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.277922 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.277935 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.277952 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.277969 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.380955 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.381008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.381021 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.381039 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.381053 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.483885 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.483928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.483938 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.483953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.483963 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.586641 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.586719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.586741 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.586779 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.586797 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.689820 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.689898 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.689916 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.689942 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.689961 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.793360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.793473 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.793509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.793542 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.793563 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.896187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.896223 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.896237 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.896252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.896260 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.999260 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.999306 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.999316 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.999330 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:18 crc kubenswrapper[4842]: I1111 13:31:18.999344 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:18Z","lastTransitionTime":"2025-11-11T13:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.058389 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:19 crc kubenswrapper[4842]: E1111 13:31:19.058567 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.101727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.101786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.101797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.101815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.101827 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.204433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.204494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.204519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.204545 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.204565 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.307733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.307775 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.307786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.307803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.307813 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.410648 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.410714 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.410723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.410736 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.410744 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.513475 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.513513 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.513521 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.513538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.513548 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.616688 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.616750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.616773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.616801 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.616821 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.720270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.720312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.720326 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.720340 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.720351 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.822675 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.822724 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.822738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.822755 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.822769 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.925971 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.926040 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.926066 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.926096 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:19 crc kubenswrapper[4842]: I1111 13:31:19.926152 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:19Z","lastTransitionTime":"2025-11-11T13:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.029326 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.029375 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.029386 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.029405 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.029418 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.058062 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:20 crc kubenswrapper[4842]: E1111 13:31:20.058491 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.058627 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.058741 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:20 crc kubenswrapper[4842]: E1111 13:31:20.058965 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:20 crc kubenswrapper[4842]: E1111 13:31:20.059148 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.072747 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.081812 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.094868 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.108155 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.122613 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.131713 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.131756 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.131767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.131784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.131795 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.136849 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.147218 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.157306 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.166898 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.187213 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.196795 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.205177 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.214793 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.225460 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.234050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.234122 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.234140 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.234161 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.234176 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.236470 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.248858 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.259725 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.273037 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.283173 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:20Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.336130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.336168 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.336177 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.336193 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.336203 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.437885 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.437918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.437927 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.437940 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.437949 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.540023 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.540067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.540076 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.540090 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.540131 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.642522 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.642581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.642598 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.642622 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.642638 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.745727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.745781 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.745799 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.745822 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.745838 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.847612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.847654 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.847665 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.847680 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.847689 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.950201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.950242 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.950250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.950263 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:20 crc kubenswrapper[4842]: I1111 13:31:20.950273 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:20Z","lastTransitionTime":"2025-11-11T13:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.052854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.052904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.052913 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.052926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.052934 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.058206 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:21 crc kubenswrapper[4842]: E1111 13:31:21.058302 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.155396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.155430 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.155438 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.155452 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.155482 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.260953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.260986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.260995 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.261012 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.261044 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.363073 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.363122 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.363133 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.363147 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.363156 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.466184 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.466638 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.466666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.466696 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.466724 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.569117 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.569705 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.569717 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.569731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.569739 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.671950 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.671981 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.671990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.672018 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.672027 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.774798 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.774825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.774850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.774864 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.774872 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.878131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.878169 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.878180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.878194 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.878204 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.980726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.980779 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.980790 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.980807 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:21 crc kubenswrapper[4842]: I1111 13:31:21.980817 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:21Z","lastTransitionTime":"2025-11-11T13:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.059200 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.059322 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:22 crc kubenswrapper[4842]: E1111 13:31:22.059348 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.059328 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:22 crc kubenswrapper[4842]: E1111 13:31:22.059542 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:22 crc kubenswrapper[4842]: E1111 13:31:22.059754 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.083051 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.083125 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.083141 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.083160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.083173 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.185404 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.185444 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.185456 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.185474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.185488 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.289162 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.289232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.289250 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.289279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.289297 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.391732 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.391771 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.391780 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.391794 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.391804 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.494627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.494663 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.494672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.494690 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.494700 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.598189 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.598255 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.598270 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.598294 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.598310 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.702029 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.702114 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.702128 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.702154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.702170 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.805576 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.805640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.805652 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.805672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.805683 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.909527 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.909615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.909636 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.909669 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:22 crc kubenswrapper[4842]: I1111 13:31:22.909693 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:22Z","lastTransitionTime":"2025-11-11T13:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.013180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.013265 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.013286 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.013319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.013344 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.058545 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:23 crc kubenswrapper[4842]: E1111 13:31:23.058773 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.060415 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:31:23 crc kubenswrapper[4842]: E1111 13:31:23.060735 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.116737 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.116830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.116847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.116874 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.116892 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.220506 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.220576 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.220592 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.220620 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.220642 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.323896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.324416 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.324605 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.324754 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.324885 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.429246 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.429292 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.429303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.429323 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.429336 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.532845 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.532934 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.532957 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.532991 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.533015 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.636590 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.636679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.636699 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.636727 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.636750 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.739066 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.739136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.739155 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.739173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.739184 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.842760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.842834 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.842847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.842866 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.842880 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.946543 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.946586 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.946597 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.946614 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:23 crc kubenswrapper[4842]: I1111 13:31:23.946625 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:23Z","lastTransitionTime":"2025-11-11T13:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.049693 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.049760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.049776 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.049801 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.049819 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.059272 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.059278 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:24 crc kubenswrapper[4842]: E1111 13:31:24.059472 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.059307 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:24 crc kubenswrapper[4842]: E1111 13:31:24.059554 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:24 crc kubenswrapper[4842]: E1111 13:31:24.059733 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.152701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.152758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.152772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.152793 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.152809 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.256993 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.257065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.257087 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.257154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.257182 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.360738 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.360777 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.360786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.360815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.360827 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.463792 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.463834 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.463844 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.463860 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.463870 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.567411 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.567477 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.567500 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.567526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.567546 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.674558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.674600 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.674610 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.674625 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.674634 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.777910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.777985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.777998 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.778024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.778046 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.880559 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.880633 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.880653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.880676 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.880694 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.983584 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.983643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.983653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.983675 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:24 crc kubenswrapper[4842]: I1111 13:31:24.983689 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:24Z","lastTransitionTime":"2025-11-11T13:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.058465 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:25 crc kubenswrapper[4842]: E1111 13:31:25.058587 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.086642 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.086694 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.086704 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.086723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.086737 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.189474 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.189552 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.189575 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.189612 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.189639 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.292360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.292420 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.292434 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.292461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.292483 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.395588 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.395651 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.395663 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.395690 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.395715 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.498050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.498136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.498172 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.498197 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.498214 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.600007 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.600044 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.600056 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.600074 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.600083 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.702709 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.702755 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.702767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.702784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.702795 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.805447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.805495 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.805504 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.805517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.805526 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.908362 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.908442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.908466 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.908497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:25 crc kubenswrapper[4842]: I1111 13:31:25.908516 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:25Z","lastTransitionTime":"2025-11-11T13:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.010608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.010655 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.010671 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.010691 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.010706 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.058461 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:26 crc kubenswrapper[4842]: E1111 13:31:26.058677 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.058516 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.058502 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:26 crc kubenswrapper[4842]: E1111 13:31:26.058770 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:26 crc kubenswrapper[4842]: E1111 13:31:26.058917 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.112312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.112354 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.112366 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.112384 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.112393 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.214887 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.214924 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.214933 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.214946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.214954 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.317581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.317638 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.317657 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.317760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.317785 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.420629 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.420683 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.420705 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.420725 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.420740 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.523708 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.523747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.523757 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.523772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.523782 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.626572 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.626628 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.626650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.626670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.626683 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.728707 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.728745 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.728753 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.728766 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.728774 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.831149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.831216 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.831232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.831252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.831266 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.933341 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.933381 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.933391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.933406 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:26 crc kubenswrapper[4842]: I1111 13:31:26.933417 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:26Z","lastTransitionTime":"2025-11-11T13:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.035854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.035948 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.035963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.035986 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.036003 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.059135 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.059309 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.139081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.139164 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.139176 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.139200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.139212 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.242461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.242519 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.242532 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.242551 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.242564 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.345563 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.345605 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.345616 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.345632 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.345644 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.450212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.450273 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.450287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.450308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.450321 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.554333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.554413 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.554433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.554464 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.554484 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.660657 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.660729 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.660747 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.660773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.660789 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.759838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.759904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.759919 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.759943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.759958 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.774527 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:27Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.780139 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.780197 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.780212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.780240 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.780256 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.797897 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:27Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.803440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.803499 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.803509 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.803528 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.803540 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.822771 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:27Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.827146 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.827209 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.827224 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.827244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.827257 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.830628 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.830882 4842 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.831036 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs podName:6b899889-1664-4e26-9cc9-0667626ac715 nodeName:}" failed. No retries permitted until 2025-11-11 13:32:31.831002526 +0000 UTC m=+162.491292275 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs") pod "network-metrics-daemon-hbtjv" (UID: "6b899889-1664-4e26-9cc9-0667626ac715") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.843068 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:27Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.847399 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.847439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.847456 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.847475 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.847488 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.864369 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:27Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:27 crc kubenswrapper[4842]: E1111 13:31:27.864527 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.867020 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.867059 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.867072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.867092 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.867128 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.970741 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.970826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.970836 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.970855 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:27 crc kubenswrapper[4842]: I1111 13:31:27.970865 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:27Z","lastTransitionTime":"2025-11-11T13:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.058771 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.058771 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.059075 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:28 crc kubenswrapper[4842]: E1111 13:31:28.059173 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:28 crc kubenswrapper[4842]: E1111 13:31:28.059535 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:28 crc kubenswrapper[4842]: E1111 13:31:28.059731 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.074560 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.074627 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.074649 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.074680 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.074700 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.183354 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.183409 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.183422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.183444 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.183462 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.287320 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.287368 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.287380 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.287400 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.287413 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.389686 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.389743 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.389762 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.389784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.389801 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.492391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.492468 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.492484 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.492508 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.492521 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.595396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.595440 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.595449 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.595465 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.595475 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.698328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.698402 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.698426 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.698458 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.698480 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.802296 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.802376 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.802392 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.802412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.802429 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.905773 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.905859 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.905873 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.905897 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:28 crc kubenswrapper[4842]: I1111 13:31:28.905913 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:28Z","lastTransitionTime":"2025-11-11T13:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.009072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.009134 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.009145 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.009180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.009193 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.058805 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:29 crc kubenswrapper[4842]: E1111 13:31:29.058969 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.112319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.112379 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.112399 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.112425 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.112441 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.215931 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.215985 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.215996 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.216019 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.216037 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.318541 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.318583 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.318593 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.318615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.318634 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.420977 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.421050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.421068 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.421084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.421113 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.524093 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.524195 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.524209 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.524236 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.524251 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.626449 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.626488 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.626499 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.626516 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.626528 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.729828 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.729883 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.729896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.729917 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.729933 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.833201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.833244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.833260 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.833279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.833296 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.936208 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.936282 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.936302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.936332 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:29 crc kubenswrapper[4842]: I1111 13:31:29.936353 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:29Z","lastTransitionTime":"2025-11-11T13:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.040480 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.040541 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.040568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.040600 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.040623 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.058278 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.058349 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:30 crc kubenswrapper[4842]: E1111 13:31:30.058651 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.058781 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:30 crc kubenswrapper[4842]: E1111 13:31:30.059000 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:30 crc kubenswrapper[4842]: E1111 13:31:30.059253 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.075277 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d695c73980ff01367b8c736eeb8845dd3b6446fe3f875f74f75d2df8877047d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.092565 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.107340 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.128033 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f384f662-ed07-40ca-835e-5e6caffe3152\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e9efb99ceaa1f30282a4ba1775a0c87b1654faa8125328c31bc2770e18c2f6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8d3a49ab3c76a094f161a27d5bac85b2dfc0352f6e640d3c0ea0c2032590e00\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://923313bf8b363782fca35c95b09c98973543a431a6b1a5ff5a416f89c15e35a9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef78ca36966c726e7d04049f5200025c69219b156343d92eb58b0c948dc938ec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83f0c8e2b83afbbc38bf642fdbb7442fbc014895e4f90bd53169855e6415a1ec\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"file observer\\\\nW1111 13:30:09.777246 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1111 13:30:09.777742 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1111 13:30:09.778745 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-786382389/tls.crt::/tmp/serving-cert-786382389/tls.key\\\\\\\"\\\\nI1111 13:30:10.127746 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1111 13:30:10.132351 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1111 13:30:10.132387 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1111 13:30:10.132422 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1111 13:30:10.132429 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1111 13:30:10.139795 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1111 13:30:10.139821 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139827 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1111 13:30:10.139832 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1111 13:30:10.139835 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1111 13:30:10.139838 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1111 13:30:10.139841 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1111 13:30:10.139857 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1111 13:30:10.142554 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:04Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://23d78faf0bb01df241d880dde0549ee01115bde0b2d32ea9adf0e141993f2d58\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc95af28c4e0b96d92de27fa7a6ad441b38e8b3ee8c96a633ae41d0b241ffb7b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.153233 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.153295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.153310 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.153338 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.153363 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.169369 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"190938a6-8514-40c9-adb9-ff237ade4e93\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1ef5a51e214e7406b2df07e81ae7d5bc67423a688dde8d5f31d0989666f22e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0c2d256f74850157cffb099b2951683104071aa981401488a444f31c56c15e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4385ce121569d1ddc2ed13e5c03a7fe0de6d10d201a83d7b657177a2182da3bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cad40397fe75a76e1973488cc3a7af029962acefecfbd867d4fce7e5310219a6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.203024 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"806b4bc1-fba9-419a-a963-989448e56a6c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://088a6b389b0e139f6bc54f1e4d9cdf3bc9f539f55ddc055e034c7eb6f02e7398\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://963c0a3e482d4b8cba9a61f052a6527283596313f6daf0ca8de563368f0fc1ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76a9779f402c0a328fdbab4f2d0bda1c3a7f3b95add2e0ff108c0c0ff8ece44b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffba1449f76f1a62b603e5462c3a4e3eaba2edc0657b36abce6620d062d8fbeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d7e8c594ea00e9f0e51639f5097b0e13235fdb16511541b5b08b9791624fe9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ce9020ffee58dc5fb5c638ac437ed5af506e3e39ffd57dae97b1845f0aca703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ce9020ffee58dc5fb5c638ac437ed5af506e3e39ffd57dae97b1845f0aca703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef23368107965021b2ba73c31caab0e6fd77f2c16fd4574f80c4f5bc2c4a2e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef23368107965021b2ba73c31caab0e6fd77f2c16fd4574f80c4f5bc2c4a2e2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://eac2e8d4d90c6372e5905dd67441e7a8d78a142497c877025b35b44183cd2c71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eac2e8d4d90c6372e5905dd67441e7a8d78a142497c877025b35b44183cd2c71\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.219631 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.232798 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.247031 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aa4a04f-c3e1-4a93-a07a-07ba502d737f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a8179f4d0844ea2aa814461e209edb15f11679050ff40651e1903d68e432daa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb77a670ecc0da07641e913bf61a9e61c0dc106ecfd263c7d23365728a991bf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f2a7c82c4733b38c245ad73d8fc6145d3d433a18f70589dd1ffef18428373de0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12d7deb581c31267475f665c247e8c6479b782f654930c565db23848ad043498\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3f929cf40623e74536d6b108bd20fc238d0217753ea75167672eefe945feaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5060de5c804f4be19f612a778ee6c6b36cc2dc118e808004c415f803cebef5b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f39f06d329201a8a833f48bcee7a22fca5d528abf718a8a51d748690bc5a0bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7wztg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mmt6t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.256224 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.256283 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.256297 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.256324 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.256337 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.259899 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c48d9130-053f-49a1-b719-2cf8535aee1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa13da6dc25daca3b75a422e243b23b850420231b0530d92b65396e0287f5b68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27bab6481a7e8b295e460a814a5b32206f27d3b07e1c03c599533ae9e92114ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ztcsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-r96nt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.273351 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a77ab7fb-38f4-4094-aef9-d563935ddfa0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00e8f727aa7ac6f81e8047934fa7d73d2b3f0e6020bfb977c672d4bc5d23ae06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5a0382b8930573cd7ed4989b85526721dae035b315df9d1d6bad5e024e18016\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://864f34d8571c2515e92c5ebf6294147e64a306ae734137a81a53e935df9de84c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd84603130762f43480ab6b95e960f045e0f52eca96c4cc00be6cadf0298b05\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.285904 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.299899 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a8a355aa743ca7dd902c8061684a926c83b0005e894a09e145be8861734de79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.315380 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0063669c0a94757632c5baa3dd03a81ddc9670645622258067dc8f6707c61349\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04a91ff8fdd2b2d1ebcc0dc77161413f5619808bfc76ff3d9868178fc3579793\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.329853 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-mggn5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a899ee4d-e1d3-44cc-a780-2dac60da21eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:00Z\\\",\\\"message\\\":\\\"2025-11-11T13:30:15+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d\\\\n2025-11-11T13:30:15+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_98fb1556-e348-47ca-9a02-6f194af9c87d to /host/opt/cni/bin/\\\\n2025-11-11T13:30:15Z [verbose] multus-daemon started\\\\n2025-11-11T13:30:15Z [verbose] Readiness Indicator file check\\\\n2025-11-11T13:31:00Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:31:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6dph9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mggn5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.349362 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.359364 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.359423 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.359434 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.359455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.359470 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.365118 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.381995 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.405758 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:30Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.461701 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.461766 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.461784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.461803 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.461814 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.564788 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.564839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.564850 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.564865 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.564874 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.668486 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.668577 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.668588 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.668608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.668620 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.770607 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.770645 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.770654 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.770670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.770679 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.873780 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.873828 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.873839 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.873858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.873868 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.976575 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.976660 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.976685 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.976719 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:30 crc kubenswrapper[4842]: I1111 13:31:30.976744 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:30Z","lastTransitionTime":"2025-11-11T13:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.058043 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:31 crc kubenswrapper[4842]: E1111 13:31:31.058234 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.079302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.079345 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.079357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.079373 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.079384 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.182589 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.182644 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.182658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.182679 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.182695 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.285893 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.285953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.285966 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.285990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.286005 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.388786 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.388826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.388842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.388863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.388876 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.490945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.490989 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.491001 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.491017 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.491028 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.593995 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.594034 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.594043 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.594057 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.594070 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.695955 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.695988 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.695997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.696009 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.696018 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.798433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.798465 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.798476 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.798490 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.798500 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.900847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.900899 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.900909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.900925 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:31 crc kubenswrapper[4842]: I1111 13:31:31.900936 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:31Z","lastTransitionTime":"2025-11-11T13:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.003030 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.003132 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.003151 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.003176 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.003194 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.059060 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.059185 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.059070 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:32 crc kubenswrapper[4842]: E1111 13:31:32.059331 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:32 crc kubenswrapper[4842]: E1111 13:31:32.059420 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:32 crc kubenswrapper[4842]: E1111 13:31:32.059499 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.105420 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.105817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.105835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.105856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.105870 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.208534 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.208582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.208593 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.208608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.208619 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.310412 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.310446 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.310456 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.310469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.310479 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.412512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.412552 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.412563 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.412582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.412596 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.515255 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.515303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.515317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.515334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.515346 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.617824 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.617869 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.617880 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.617893 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.617902 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.720032 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.720063 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.720078 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.720111 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.720122 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.822408 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.822442 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.822455 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.822468 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.822480 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.924652 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.924684 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.924694 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.924708 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:32 crc kubenswrapper[4842]: I1111 13:31:32.924716 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:32Z","lastTransitionTime":"2025-11-11T13:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.026811 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.026861 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.026872 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.026886 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.026897 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.058732 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:33 crc kubenswrapper[4842]: E1111 13:31:33.058949 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.129203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.129233 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.129244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.129262 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.129274 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.232565 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.232608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.232624 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.232640 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.232652 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.334867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.334895 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.334905 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.334919 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.334927 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.437160 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.437196 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.437204 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.437217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.437226 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.539006 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.539067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.539078 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.539094 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.539139 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.641844 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.641894 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.641904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.641917 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.641924 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.744330 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.744378 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.744407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.744422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.744433 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.847400 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.847491 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.847526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.847556 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.847572 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.950908 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.950976 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.950990 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.951014 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:33 crc kubenswrapper[4842]: I1111 13:31:33.951030 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:33Z","lastTransitionTime":"2025-11-11T13:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.053720 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.053760 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.053770 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.053784 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.053793 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.059017 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.059064 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:34 crc kubenswrapper[4842]: E1111 13:31:34.059127 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.059024 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:34 crc kubenswrapper[4842]: E1111 13:31:34.059402 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:34 crc kubenswrapper[4842]: E1111 13:31:34.062824 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.156842 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.156926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.156958 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.156988 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.157010 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.260271 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.260317 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.260328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.260344 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.260356 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.362459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.362492 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.362500 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.362512 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.362522 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.466091 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.466185 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.466203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.466228 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.466246 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.569053 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.569130 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.569142 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.569159 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.569173 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.672262 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.672324 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.672339 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.672368 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.672384 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.775156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.775193 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.775206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.775225 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.775239 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.878881 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.878918 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.878927 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.878943 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.878958 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.981478 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.981536 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.981547 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.981566 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:34 crc kubenswrapper[4842]: I1111 13:31:34.981582 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:34Z","lastTransitionTime":"2025-11-11T13:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.058694 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:35 crc kubenswrapper[4842]: E1111 13:31:35.058830 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.060216 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:31:35 crc kubenswrapper[4842]: E1111 13:31:35.060558 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\"" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.084199 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.084235 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.084253 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.084275 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.084287 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.187272 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.187313 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.187322 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.187337 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.187346 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.290296 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.290397 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.290422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.290453 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.290476 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.393746 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.393815 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.393843 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.393868 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.393883 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.497249 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.497300 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.497312 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.497330 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.497343 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.600585 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.600626 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.600635 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.600650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.600660 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.705067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.705143 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.705154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.705175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.705186 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.808763 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.808823 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.808837 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.808858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.808875 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.911420 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.911483 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.911502 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.911526 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:35 crc kubenswrapper[4842]: I1111 13:31:35.911542 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:35Z","lastTransitionTime":"2025-11-11T13:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.014156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.014201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.014217 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.014235 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.014246 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.058914 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.058976 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:36 crc kubenswrapper[4842]: E1111 13:31:36.059049 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.059067 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:36 crc kubenswrapper[4842]: E1111 13:31:36.059182 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:36 crc kubenswrapper[4842]: E1111 13:31:36.059322 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.117000 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.117038 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.117050 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.117066 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.117078 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.220206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.220289 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.220298 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.220331 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.220359 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.323447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.323505 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.323517 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.323535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.323549 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.426021 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.426084 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.426132 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.426152 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.426162 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.529275 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.529307 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.529316 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.529328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.529341 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.632212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.632292 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.632304 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.632327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.632336 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.735780 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.736164 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.736251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.736337 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.736421 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.840329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.840389 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.840404 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.840422 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.840434 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.943535 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.943604 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.943622 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.943645 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:36 crc kubenswrapper[4842]: I1111 13:31:36.943657 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:36Z","lastTransitionTime":"2025-11-11T13:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.046581 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.046628 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.046645 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.046740 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.046757 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.059123 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:37 crc kubenswrapper[4842]: E1111 13:31:37.059324 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.149659 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.149708 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.149726 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.149744 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.149756 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.252508 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.252569 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.252582 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.252601 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.252611 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.355251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.355295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.355306 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.355324 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.355335 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.457873 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.457917 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.457965 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.457982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.457994 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.561017 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.561067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.561079 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.561096 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.561126 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.663203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.663262 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.663287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.663308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.663323 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.765465 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.765514 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.765543 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.765560 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.765570 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.867870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.867928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.867938 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.867961 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.867972 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.970854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.970898 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.970906 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.970922 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:37 crc kubenswrapper[4842]: I1111 13:31:37.970931 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:37Z","lastTransitionTime":"2025-11-11T13:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.058158 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.058228 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.058324 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.058179 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.058465 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.058552 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.074653 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.074713 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.074728 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.074749 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.074760 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.177757 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.178066 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.178081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.178133 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.178154 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.227391 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.227450 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.227462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.227481 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.227491 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.239609 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.245065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.245131 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.245149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.245183 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.245200 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.265264 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.270465 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.270520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.270531 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.270551 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.270564 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.284432 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.289462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.289506 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.289518 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.289537 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.289551 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.302437 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.307516 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.307557 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.307567 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.307584 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.307596 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.320732 4842 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-11T13:31:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9ac0780f-86e7-49c9-975f-c53d42cb190a\\\",\\\"systemUUID\\\":\\\"8b08dd54-690d-45ec-9438-693834fc9d7e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:38Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:38 crc kubenswrapper[4842]: E1111 13:31:38.321048 4842 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.323348 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.323399 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.323416 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.323441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.323457 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.426400 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.426439 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.426451 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.426467 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.426477 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.531276 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.531780 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.531831 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.531872 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.531895 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.634063 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.634153 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.634163 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.634178 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.634187 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.736252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.736308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.736319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.736335 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.736348 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.838944 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.838997 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.839008 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.839025 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.839040 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.941081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.941145 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.941153 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.941175 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:38 crc kubenswrapper[4842]: I1111 13:31:38.941183 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:38Z","lastTransitionTime":"2025-11-11T13:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.045247 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.045310 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.045329 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.045358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.045375 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.058815 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:39 crc kubenswrapper[4842]: E1111 13:31:39.058936 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.147912 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.147980 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.147993 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.148009 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.148021 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.250354 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.250395 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.250403 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.250417 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.250426 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.352328 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.352373 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.352385 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.352398 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.352407 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.454501 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.454559 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.454568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.454585 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.454595 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.557081 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.557342 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.557357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.557443 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.557455 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.660689 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.660798 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.660827 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.660870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.660900 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.764524 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.764609 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.764634 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.764666 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.764689 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.867620 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.867714 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.867742 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.867781 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.867810 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.970758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.970858 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.970882 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.970911 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:39 crc kubenswrapper[4842]: I1111 13:31:39.970928 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:39Z","lastTransitionTime":"2025-11-11T13:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.059353 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.059390 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.059357 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:40 crc kubenswrapper[4842]: E1111 13:31:40.059532 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:40 crc kubenswrapper[4842]: E1111 13:31:40.059787 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:40 crc kubenswrapper[4842]: E1111 13:31:40.059944 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073749 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30d2d925-0a6b-49aa-ac20-0741496ddf09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:29:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9413003586cc6d3cb2392647330f7682402095dfb535eb0062ebc95c451a2350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:29:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b28f0706ff69ddbf271a81d57dfb837a7fc48b0e6b6d1714f6f3e5ba5a17f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:29:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:29:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:29:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073849 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073896 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073906 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073923 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.073936 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.085427 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-p8pll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2ec2204-2327-4d28-a8d3-b24380c1671c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5192b1aa4df24e2bef7fafbe144f11c7a87fe4f65df5bb83b4d5458791514818\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-np4mp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-p8pll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.098412 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-f5rhw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"123a19db-ff30-45bf-913c-61f72e10cadc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8b6da373a3d4112359445b9a17a0dfdc5894c67f3dfd3f9b881ac78bda1b3f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l9bq6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:09Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-f5rhw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.123863 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-11T13:31:08Z\\\",\\\"message\\\":\\\"SNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.139\\\\\\\", Port:17698, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1111 13:31:08.047623 6918 services_controller.go:452] Built service openshift-ingress-canary/ingress-canary per-node LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047636 6918 services_controller.go:453] Built service openshift-ingress-canary/ingress-canary template LB for network=default: []services.LB{}\\\\nI1111 13:31:08.047616 6918 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-scheduler-operator/metrics]} name:Service_openshift-kube-scheduler-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.233:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1dc899db-4498-4b7a-8437-861940b962e7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1111 13:31:08.047202 6918 services_controller.go:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-11T13:31:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-dzhjw_openshift-ovn-kubernetes(d8bdbe88-f5ed-4117-92ea-6e1f45f6b495)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-11T13:30:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nzc59\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dzhjw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.136492 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f3edace-782c-4646-8a57-d39d8373bb14\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90eb733e92c7fb96e641ac8a917a260ad242c74e7b90940502bd0fda048e538d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-11T13:30:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t72xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-k84vc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.149735 4842 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6b899889-1664-4e26-9cc9-0667626ac715\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-11T13:30:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ttbwz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-11T13:30:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-hbtjv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-11T13:31:40Z is after 2025-08-24T17:21:41Z" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.175734 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.175775 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.175826 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.175846 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.175919 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.178388 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.178366628 podStartE2EDuration="1m29.178366628s" podCreationTimestamp="2025-11-11 13:30:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.178324857 +0000 UTC m=+110.838614476" watchObservedRunningTime="2025-11-11 13:31:40.178366628 +0000 UTC m=+110.838656257" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.233142 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=57.23311918 podStartE2EDuration="57.23311918s" podCreationTimestamp="2025-11-11 13:30:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.20223503 +0000 UTC m=+110.862524659" watchObservedRunningTime="2025-11-11 13:31:40.23311918 +0000 UTC m=+110.893408799" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.233311 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=20.233306296 podStartE2EDuration="20.233306296s" podCreationTimestamp="2025-11-11 13:31:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.228868738 +0000 UTC m=+110.889158357" watchObservedRunningTime="2025-11-11 13:31:40.233306296 +0000 UTC m=+110.893595915" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.279134 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.279169 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.279180 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.279193 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.279204 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.310096 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-mmt6t" podStartSLOduration=90.310076543 podStartE2EDuration="1m30.310076543s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.295633324 +0000 UTC m=+110.955922993" watchObservedRunningTime="2025-11-11 13:31:40.310076543 +0000 UTC m=+110.970366162" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.324609 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-r96nt" podStartSLOduration=90.324588074 podStartE2EDuration="1m30.324588074s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.310663081 +0000 UTC m=+110.970952700" watchObservedRunningTime="2025-11-11 13:31:40.324588074 +0000 UTC m=+110.984877693" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.338190 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=91.338169476 podStartE2EDuration="1m31.338169476s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.324550012 +0000 UTC m=+110.984839651" watchObservedRunningTime="2025-11-11 13:31:40.338169476 +0000 UTC m=+110.998459095" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.382004 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.382060 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.382072 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.382128 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.382153 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.383950 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-mggn5" podStartSLOduration=90.383927149 podStartE2EDuration="1m30.383927149s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:40.38334016 +0000 UTC m=+111.043629779" watchObservedRunningTime="2025-11-11 13:31:40.383927149 +0000 UTC m=+111.044216778" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.485489 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.485553 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.485572 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.485597 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.485618 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.589469 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.589523 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.589537 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.589558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.589572 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.694926 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.695360 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.695505 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.695591 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.695677 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.798979 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.799044 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.799059 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.799080 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.799114 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.901283 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.901568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.901631 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.901698 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:40 crc kubenswrapper[4842]: I1111 13:31:40.901783 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:40Z","lastTransitionTime":"2025-11-11T13:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.004867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.004938 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.004953 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.004975 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.004989 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.058906 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:41 crc kubenswrapper[4842]: E1111 13:31:41.059044 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.107870 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.107945 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.107958 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.107982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.107993 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.211215 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.211298 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.211321 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.211357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.211380 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.314466 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.314520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.314531 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.314545 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.314555 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.416647 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.416685 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.416694 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.416706 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.416716 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.519252 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.519308 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.519320 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.519338 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.519353 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.621560 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.621609 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.621617 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.621629 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.621639 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.724383 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.724435 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.724447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.724494 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.724506 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.827674 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.827750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.827758 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.827772 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.827781 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.930069 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.930127 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.930137 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.930149 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:41 crc kubenswrapper[4842]: I1111 13:31:41.930159 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:41Z","lastTransitionTime":"2025-11-11T13:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.033267 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.033319 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.033333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.033355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.033369 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.058773 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.058823 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.058834 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:42 crc kubenswrapper[4842]: E1111 13:31:42.058940 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:42 crc kubenswrapper[4842]: E1111 13:31:42.059129 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:42 crc kubenswrapper[4842]: E1111 13:31:42.059246 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.136824 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.136853 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.136862 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.136875 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.136884 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.239227 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.239272 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.239284 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.239302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.239315 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.342211 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.342251 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.342262 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.342280 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.342293 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.445734 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.445797 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.445813 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.445837 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.445853 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.549056 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.549086 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.549115 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.549128 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.549137 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.651615 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.651650 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.651658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.651670 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.651679 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.756358 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.756497 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.756511 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.756540 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.756555 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.859989 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.860080 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.860115 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.860135 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.860148 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.963067 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.963162 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.963181 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.963206 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:42 crc kubenswrapper[4842]: I1111 13:31:42.963224 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:42Z","lastTransitionTime":"2025-11-11T13:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.058022 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:43 crc kubenswrapper[4842]: E1111 13:31:43.058193 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.065789 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.065832 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.065848 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.065865 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.065876 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.168847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.168904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.168914 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.168933 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.168947 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.272568 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.272643 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.272658 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.272678 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.272692 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.375310 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.375369 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.375388 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.375410 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.375426 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.479156 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.479200 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.479219 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.479244 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.479258 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.582584 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.582681 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.582697 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.582791 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.582818 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.687910 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.687977 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.687993 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.688018 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.688032 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.790886 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.790933 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.790946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.790963 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.790975 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.894433 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.894503 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.894516 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.894538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.894554 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.997982 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.998033 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.998049 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.998069 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:43 crc kubenswrapper[4842]: I1111 13:31:43.998082 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:43Z","lastTransitionTime":"2025-11-11T13:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.058134 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.058337 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.058407 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:44 crc kubenswrapper[4842]: E1111 13:31:44.058535 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:44 crc kubenswrapper[4842]: E1111 13:31:44.058692 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:44 crc kubenswrapper[4842]: E1111 13:31:44.058912 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.102289 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.102355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.102378 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.102408 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.102430 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.205334 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.205394 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.205407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.205431 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.205447 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.308863 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.308900 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.308911 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.308929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.308940 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.411578 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.411632 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.411641 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.411659 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.411671 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.514295 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.514342 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.514355 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.514373 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.514386 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.616854 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.616909 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.616928 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.616946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.616958 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.720073 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.720173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.720187 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.720202 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.720213 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.822767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.822847 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.822869 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.822905 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.822933 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.926303 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.926353 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.926364 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.926385 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:44 crc kubenswrapper[4842]: I1111 13:31:44.926398 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:44Z","lastTransitionTime":"2025-11-11T13:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.029946 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.029995 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.030005 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.030026 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.030039 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.058650 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:45 crc kubenswrapper[4842]: E1111 13:31:45.058854 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.131834 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.131878 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.131888 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.131904 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.131914 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.234937 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.234992 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.235003 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.235024 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.235037 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.337399 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.337448 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.337459 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.337476 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.337489 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.441210 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.441277 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.441287 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.441302 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.441311 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.543983 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.544037 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.544047 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.544063 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.544075 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.646828 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.646879 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.646892 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.646908 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.646920 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.749461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.749508 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.749520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.749538 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.749549 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.851856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.851964 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.851973 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.851988 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.851999 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.954357 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.954396 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.954405 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.954418 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:45 crc kubenswrapper[4842]: I1111 13:31:45.954428 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:45Z","lastTransitionTime":"2025-11-11T13:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.056672 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.056720 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.056731 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.056750 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.056762 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.058968 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.059018 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:46 crc kubenswrapper[4842]: E1111 13:31:46.059095 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.059133 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:46 crc kubenswrapper[4842]: E1111 13:31:46.059227 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:46 crc kubenswrapper[4842]: E1111 13:31:46.059311 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.158880 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.158913 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.158929 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.158968 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.158979 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.261395 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.261436 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.261447 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.261465 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.261478 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.364161 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.364203 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.364213 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.364232 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.364244 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.466817 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.466856 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.466865 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.466880 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.466891 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.570065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.570125 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.570136 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.570153 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.570165 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.673035 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.673095 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.673148 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.673173 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.673188 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.775529 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.775591 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.775605 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.775620 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.775650 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.878622 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.878675 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.878687 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.878706 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.878719 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.980684 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.980812 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.980825 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.980838 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:46 crc kubenswrapper[4842]: I1111 13:31:46.980848 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:46Z","lastTransitionTime":"2025-11-11T13:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.058446 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:47 crc kubenswrapper[4842]: E1111 13:31:47.058607 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.083685 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.083723 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.083733 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.083762 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.083780 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.186218 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.186273 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.186282 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.186301 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.186312 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.288185 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.288264 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.288279 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.288333 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.288352 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.390812 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.390867 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.390883 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.390905 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.390919 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.493065 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.493147 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.493157 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.493171 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.493182 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.596145 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.596201 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.596212 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.596230 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.596240 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.699380 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.699470 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.699484 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.699502 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.699520 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.748484 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/1.log" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.749288 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/0.log" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.749368 4842 generic.go:334] "Generic (PLEG): container finished" podID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" containerID="969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067" exitCode=1 Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.749426 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerDied","Data":"969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.749508 4842 scope.go:117] "RemoveContainer" containerID="2758f3720d7360c3dbe5b43cd67164ed18f0a6c192fb5636b94afcf81c339f4c" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.750041 4842 scope.go:117] "RemoveContainer" containerID="969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067" Nov 11 13:31:47 crc kubenswrapper[4842]: E1111 13:31:47.750270 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-mggn5_openshift-multus(a899ee4d-e1d3-44cc-a780-2dac60da21eb)\"" pod="openshift-multus/multus-mggn5" podUID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.774963 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podStartSLOduration=98.774930173 podStartE2EDuration="1m38.774930173s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:47.772642932 +0000 UTC m=+118.432932551" watchObservedRunningTime="2025-11-11 13:31:47.774930173 +0000 UTC m=+118.435219802" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.801925 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.801964 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.801974 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.801991 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.802000 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.820313 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=52.820292154 podStartE2EDuration="52.820292154s" podCreationTimestamp="2025-11-11 13:30:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:47.820222382 +0000 UTC m=+118.480512001" watchObservedRunningTime="2025-11-11 13:31:47.820292154 +0000 UTC m=+118.480581793" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.845525 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-p8pll" podStartSLOduration=98.845485327 podStartE2EDuration="1m38.845485327s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:47.831711359 +0000 UTC m=+118.492000998" watchObservedRunningTime="2025-11-11 13:31:47.845485327 +0000 UTC m=+118.505774966" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.845923 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-f5rhw" podStartSLOduration=98.845917401 podStartE2EDuration="1m38.845917401s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:47.845283811 +0000 UTC m=+118.505573450" watchObservedRunningTime="2025-11-11 13:31:47.845917401 +0000 UTC m=+118.506207030" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.904558 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.904800 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.904814 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.904830 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:47 crc kubenswrapper[4842]: I1111 13:31:47.904840 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:47Z","lastTransitionTime":"2025-11-11T13:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.007152 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.007688 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.007767 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.007835 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.007911 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.058869 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.058964 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:48 crc kubenswrapper[4842]: E1111 13:31:48.059036 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.059338 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:48 crc kubenswrapper[4842]: E1111 13:31:48.059451 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:48 crc kubenswrapper[4842]: E1111 13:31:48.059578 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.109967 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.110013 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.110023 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.110036 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.110048 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.212285 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.212327 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.212336 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.212350 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.212362 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.315378 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.315429 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.315441 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.315462 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.315477 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.418154 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.418424 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.418520 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.418608 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.418689 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.521611 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.521659 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.521668 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.521688 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.521700 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.572351 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.572407 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.572438 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.572461 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.572473 4842 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-11T13:31:48Z","lastTransitionTime":"2025-11-11T13:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.611478 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn"] Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.611858 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.613900 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.614059 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.614792 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.614889 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.753154 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/1.log" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.757981 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9117c85d-155f-4ca6-b235-d8dae018246f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.758216 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.758336 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9117c85d-155f-4ca6-b235-d8dae018246f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.758837 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.758976 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9117c85d-155f-4ca6-b235-d8dae018246f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.860415 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9117c85d-155f-4ca6-b235-d8dae018246f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.860712 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9117c85d-155f-4ca6-b235-d8dae018246f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.860800 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.860880 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.860978 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9117c85d-155f-4ca6-b235-d8dae018246f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.861320 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.861360 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/9117c85d-155f-4ca6-b235-d8dae018246f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.862192 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9117c85d-155f-4ca6-b235-d8dae018246f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.867613 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9117c85d-155f-4ca6-b235-d8dae018246f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.882082 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9117c85d-155f-4ca6-b235-d8dae018246f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-2kczn\" (UID: \"9117c85d-155f-4ca6-b235-d8dae018246f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:48 crc kubenswrapper[4842]: I1111 13:31:48.925649 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.058431 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:49 crc kubenswrapper[4842]: E1111 13:31:49.058555 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.059165 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.758439 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/3.log" Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.760927 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerStarted","Data":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.761355 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.763146 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" event={"ID":"9117c85d-155f-4ca6-b235-d8dae018246f","Type":"ContainerStarted","Data":"06edd299142850657630844833d7b8c29c4e51ba8bab66af239c08402b6b77d3"} Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.763191 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" event={"ID":"9117c85d-155f-4ca6-b235-d8dae018246f","Type":"ContainerStarted","Data":"30b5a7f9e14537d127d1651f70a4dc095c2f6dee0fc6272141aa1c2e71b08e70"} Nov 11 13:31:49 crc kubenswrapper[4842]: I1111 13:31:49.808706 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podStartSLOduration=99.808677826 podStartE2EDuration="1m39.808677826s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:49.791057278 +0000 UTC m=+120.451346897" watchObservedRunningTime="2025-11-11 13:31:49.808677826 +0000 UTC m=+120.468967445" Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.060227 4842 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.060500 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.060513 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.060582 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.060679 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.060855 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.060989 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.096777 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-2kczn" podStartSLOduration=100.096741542 podStartE2EDuration="1m40.096741542s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:31:49.809980986 +0000 UTC m=+120.470270605" watchObservedRunningTime="2025-11-11 13:31:50.096741542 +0000 UTC m=+120.757031161" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.097310 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-hbtjv"] Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.176998 4842 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 11 13:31:50 crc kubenswrapper[4842]: I1111 13:31:50.767084 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:50 crc kubenswrapper[4842]: E1111 13:31:50.767299 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:51 crc kubenswrapper[4842]: I1111 13:31:51.058616 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:51 crc kubenswrapper[4842]: E1111 13:31:51.058753 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:52 crc kubenswrapper[4842]: I1111 13:31:52.058923 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:52 crc kubenswrapper[4842]: I1111 13:31:52.058995 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:52 crc kubenswrapper[4842]: I1111 13:31:52.059058 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:52 crc kubenswrapper[4842]: E1111 13:31:52.059121 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:52 crc kubenswrapper[4842]: E1111 13:31:52.059179 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:52 crc kubenswrapper[4842]: E1111 13:31:52.059248 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:53 crc kubenswrapper[4842]: I1111 13:31:53.057958 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:53 crc kubenswrapper[4842]: E1111 13:31:53.058181 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:54 crc kubenswrapper[4842]: I1111 13:31:54.058231 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:54 crc kubenswrapper[4842]: I1111 13:31:54.058285 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:54 crc kubenswrapper[4842]: I1111 13:31:54.058231 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:54 crc kubenswrapper[4842]: E1111 13:31:54.058384 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:54 crc kubenswrapper[4842]: E1111 13:31:54.058417 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:54 crc kubenswrapper[4842]: E1111 13:31:54.058478 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:55 crc kubenswrapper[4842]: I1111 13:31:55.059834 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:55 crc kubenswrapper[4842]: E1111 13:31:55.060224 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:55 crc kubenswrapper[4842]: E1111 13:31:55.178626 4842 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 11 13:31:56 crc kubenswrapper[4842]: I1111 13:31:56.058155 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:56 crc kubenswrapper[4842]: E1111 13:31:56.058295 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:56 crc kubenswrapper[4842]: I1111 13:31:56.058421 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:56 crc kubenswrapper[4842]: I1111 13:31:56.058482 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:56 crc kubenswrapper[4842]: E1111 13:31:56.058667 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:56 crc kubenswrapper[4842]: E1111 13:31:56.058852 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:57 crc kubenswrapper[4842]: I1111 13:31:57.059055 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:57 crc kubenswrapper[4842]: E1111 13:31:57.059811 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:31:58 crc kubenswrapper[4842]: I1111 13:31:58.058419 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:31:58 crc kubenswrapper[4842]: I1111 13:31:58.058589 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:31:58 crc kubenswrapper[4842]: I1111 13:31:58.058582 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:31:58 crc kubenswrapper[4842]: E1111 13:31:58.058707 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:31:58 crc kubenswrapper[4842]: E1111 13:31:58.058907 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:31:58 crc kubenswrapper[4842]: E1111 13:31:58.059010 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:31:59 crc kubenswrapper[4842]: I1111 13:31:59.058494 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:31:59 crc kubenswrapper[4842]: E1111 13:31:59.058673 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:32:00 crc kubenswrapper[4842]: I1111 13:32:00.059017 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:00 crc kubenswrapper[4842]: I1111 13:32:00.059139 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:00 crc kubenswrapper[4842]: I1111 13:32:00.059191 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:00 crc kubenswrapper[4842]: E1111 13:32:00.060918 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:32:00 crc kubenswrapper[4842]: E1111 13:32:00.061163 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:32:00 crc kubenswrapper[4842]: E1111 13:32:00.061311 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:32:00 crc kubenswrapper[4842]: E1111 13:32:00.179375 4842 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 11 13:32:01 crc kubenswrapper[4842]: I1111 13:32:01.058352 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:01 crc kubenswrapper[4842]: E1111 13:32:01.058798 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:32:01 crc kubenswrapper[4842]: I1111 13:32:01.059087 4842 scope.go:117] "RemoveContainer" containerID="969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067" Nov 11 13:32:01 crc kubenswrapper[4842]: I1111 13:32:01.808699 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/1.log" Nov 11 13:32:01 crc kubenswrapper[4842]: I1111 13:32:01.808782 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerStarted","Data":"3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b"} Nov 11 13:32:02 crc kubenswrapper[4842]: I1111 13:32:02.058900 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:02 crc kubenswrapper[4842]: I1111 13:32:02.058921 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:02 crc kubenswrapper[4842]: I1111 13:32:02.058898 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:02 crc kubenswrapper[4842]: E1111 13:32:02.059044 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:32:02 crc kubenswrapper[4842]: E1111 13:32:02.059295 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:32:02 crc kubenswrapper[4842]: E1111 13:32:02.059345 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:32:03 crc kubenswrapper[4842]: I1111 13:32:03.058777 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:03 crc kubenswrapper[4842]: E1111 13:32:03.058912 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:32:04 crc kubenswrapper[4842]: I1111 13:32:04.058833 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:04 crc kubenswrapper[4842]: I1111 13:32:04.058924 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:04 crc kubenswrapper[4842]: E1111 13:32:04.058968 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-hbtjv" podUID="6b899889-1664-4e26-9cc9-0667626ac715" Nov 11 13:32:04 crc kubenswrapper[4842]: E1111 13:32:04.059156 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 11 13:32:04 crc kubenswrapper[4842]: I1111 13:32:04.059242 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:04 crc kubenswrapper[4842]: E1111 13:32:04.059459 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 11 13:32:05 crc kubenswrapper[4842]: I1111 13:32:05.058615 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:05 crc kubenswrapper[4842]: E1111 13:32:05.058860 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.059329 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.059401 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.059411 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.062343 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.062377 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.062428 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.062585 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.062867 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 11 13:32:06 crc kubenswrapper[4842]: I1111 13:32:06.063191 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 11 13:32:07 crc kubenswrapper[4842]: I1111 13:32:07.058192 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.753487 4842 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.808293 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.809055 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-jczw6"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.809313 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.809399 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.809437 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-97v95"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.810081 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.812016 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-tncv8"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.812582 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.813459 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ppkl7"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.814185 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.817735 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.821784 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.822486 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.822768 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.824301 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.824623 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830358 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830439 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830606 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830705 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830770 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830858 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.830971 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.831059 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.831133 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.831212 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.832369 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.832643 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.832768 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.832848 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833070 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833203 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833280 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833369 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833442 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833452 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833518 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833620 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833639 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833700 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833767 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833847 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.833974 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.834118 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.834375 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.834927 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.835002 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.834940 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.835518 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.839567 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.839662 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.840290 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.843974 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.844982 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.845362 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.845424 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.845636 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.845975 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.846309 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.846367 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.846353 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.846998 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.847680 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.847760 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848473 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848638 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848740 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848821 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848846 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848662 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848681 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.848969 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.849245 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.849435 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.850708 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-x27gc"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.851092 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wqftc"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.854633 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855068 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855822 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855897 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855903 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855938 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855895 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855970 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.855831 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.856248 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.856536 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.856753 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.857045 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.857263 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.865997 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.868679 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.869191 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.872528 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.873511 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.876947 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.877642 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878124 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878199 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878307 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878550 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878621 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878675 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878829 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.878932 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879023 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879554 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879573 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879775 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879817 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.879865 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880042 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880063 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880395 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880449 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880503 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880627 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880809 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.880628 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.881105 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.881589 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.882390 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.920895 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.921508 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.921564 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.921826 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.922037 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.922466 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.922637 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.922886 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.923768 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.923931 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924227 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfjpq\" (UniqueName: \"kubernetes.io/projected/f16d8887-9c10-4144-a30d-f09a1feea711-kube-api-access-vfjpq\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924261 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924325 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924400 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924447 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-config\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924491 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-images\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924559 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72zg9\" (UniqueName: \"kubernetes.io/projected/0c277b36-785e-4e8f-828e-17e36dac70be-kube-api-access-72zg9\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924578 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924601 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924636 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-serving-cert\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924656 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-service-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924699 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-config\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924733 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c277b36-785e-4e8f-828e-17e36dac70be-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.924831 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-client\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.925164 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.925230 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5j9w4"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.925988 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.927814 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.931351 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.933363 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.940723 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.941280 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.941975 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.942462 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.942695 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.942805 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.943482 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.944708 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.945059 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.945182 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.945260 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.948301 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.951535 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.951577 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.953493 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.953815 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.958349 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.968138 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-9x6tf"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.969216 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.970616 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.972407 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.972862 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.973477 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.973713 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.974840 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.975200 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.975424 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9k5gp"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.978431 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-22v98"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.979474 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.980567 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.982143 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jwb8g"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.984182 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.985218 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.987088 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.988278 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.988973 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.991074 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.992357 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.992907 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.993811 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.995271 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:08 crc kubenswrapper[4842]: I1111 13:32:08.998145 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.000256 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-jczw6"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.006949 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jkk9p"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.008850 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.010014 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.010787 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.013933 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tncv8"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.015630 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.016708 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.019359 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.020571 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.021966 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.024224 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.025944 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.026661 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-97v95"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028084 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-424kk\" (UniqueName: \"kubernetes.io/projected/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-kube-api-access-424kk\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028150 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-images\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028176 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-config\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028195 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-client\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028222 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028271 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgrtv\" (UniqueName: \"kubernetes.io/projected/cf04d2ad-3dd4-418c-b9ea-9b749105b467-kube-api-access-qgrtv\") pod \"downloads-7954f5f757-tncv8\" (UID: \"cf04d2ad-3dd4-418c-b9ea-9b749105b467\") " pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028290 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028306 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72zg9\" (UniqueName: \"kubernetes.io/projected/0c277b36-785e-4e8f-828e-17e36dac70be-kube-api-access-72zg9\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028324 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f24d13cc-287a-4029-8151-a8a07dccd223-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028341 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028413 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028424 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028438 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028515 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028541 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028581 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-serving-cert\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028610 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-service-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028628 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-config\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028644 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-machine-approver-tls\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028661 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028677 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64x9q\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-kube-api-access-64x9q\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028698 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c277b36-785e-4e8f-828e-17e36dac70be-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028715 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/793581cd-1066-4152-8a30-4004fa059137-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028732 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d3fb774-9244-48e1-8733-4f7e199e1c00-serving-cert\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028748 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ef309b8-4aa2-411b-8620-af612f546585-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028764 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028783 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028799 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit-dir\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028830 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-serving-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028855 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-auth-proxy-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028872 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028886 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-encryption-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028901 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028919 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-client\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028933 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028949 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kbzk\" (UniqueName: \"kubernetes.io/projected/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-kube-api-access-7kbzk\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.028978 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfjpq\" (UniqueName: \"kubernetes.io/projected/f16d8887-9c10-4144-a30d-f09a1feea711-kube-api-access-vfjpq\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029010 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029036 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/793581cd-1066-4152-8a30-4004fa059137-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029059 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dsmg\" (UniqueName: \"kubernetes.io/projected/4d3fb774-9244-48e1-8733-4f7e199e1c00-kube-api-access-4dsmg\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029078 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029096 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029139 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029153 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029171 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029221 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlqpc\" (UniqueName: \"kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029268 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-serving-cert\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-service-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029369 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029412 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-node-pullsecrets\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029446 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029469 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpt8z\" (UniqueName: \"kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029507 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029524 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029519 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-config\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029585 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f24d13cc-287a-4029-8151-a8a07dccd223-serving-cert\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029600 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw9fp\" (UniqueName: \"kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029637 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfp7f\" (UniqueName: \"kubernetes.io/projected/4ef309b8-4aa2-411b-8620-af612f546585-kube-api-access-lfp7f\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029659 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029680 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029698 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-config\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029735 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029755 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029790 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029806 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smcrb\" (UniqueName: \"kubernetes.io/projected/f24d13cc-287a-4029-8151-a8a07dccd223-kube-api-access-smcrb\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029842 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-image-import-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.029898 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c277b36-785e-4e8f-828e-17e36dac70be-images\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.030065 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.030087 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.030087 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-config\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.030151 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-service-ca\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.031374 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.032618 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.033361 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.034223 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.036007 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-x27gc"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.036551 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-etcd-client\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.036916 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c277b36-785e-4e8f-828e-17e36dac70be-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.037208 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wqftc"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.039687 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f16d8887-9c10-4144-a30d-f09a1feea711-serving-cert\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.040064 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.041394 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-svrwd"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.042195 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.042758 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-x55hn"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.043671 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.044076 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9k5gp"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.045446 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jwb8g"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.046930 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.048889 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.049723 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ppkl7"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.051277 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5j9w4"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.052162 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.052383 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.053533 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.054584 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.055887 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.056721 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.057764 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-svrwd"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.059084 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.060465 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.061555 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.065700 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.069467 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jkk9p"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.070252 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-22v98"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.071298 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.074208 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-6jh72"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.074939 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.075983 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6jh72"] Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.091213 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.111694 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131016 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131068 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-client\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131335 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131366 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131397 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgrtv\" (UniqueName: \"kubernetes.io/projected/cf04d2ad-3dd4-418c-b9ea-9b749105b467-kube-api-access-qgrtv\") pod \"downloads-7954f5f757-tncv8\" (UID: \"cf04d2ad-3dd4-418c-b9ea-9b749105b467\") " pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131570 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.131614 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f24d13cc-287a-4029-8151-a8a07dccd223-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132241 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132261 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f24d13cc-287a-4029-8151-a8a07dccd223-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132422 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132465 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132491 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132519 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132542 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132727 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-service-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132766 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-machine-approver-tls\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132795 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132819 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64x9q\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-kube-api-access-64x9q\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132843 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/793581cd-1066-4152-8a30-4004fa059137-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132865 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d3fb774-9244-48e1-8733-4f7e199e1c00-serving-cert\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132886 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ef309b8-4aa2-411b-8620-af612f546585-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132907 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.132995 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit-dir\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133034 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-serving-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133065 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-auth-proxy-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133086 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133127 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-encryption-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133157 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133180 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kbzk\" (UniqueName: \"kubernetes.io/projected/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-kube-api-access-7kbzk\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133207 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133257 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133279 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/793581cd-1066-4152-8a30-4004fa059137-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133303 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dsmg\" (UniqueName: \"kubernetes.io/projected/4d3fb774-9244-48e1-8733-4f7e199e1c00-kube-api-access-4dsmg\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133330 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133331 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133352 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133377 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133412 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133434 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133446 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133459 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlqpc\" (UniqueName: \"kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133528 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-serving-cert\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133579 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133606 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-node-pullsecrets\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133637 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133661 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpt8z\" (UniqueName: \"kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133662 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133687 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133713 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133735 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw9fp\" (UniqueName: \"kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133735 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133762 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f24d13cc-287a-4029-8151-a8a07dccd223-serving-cert\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133792 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfp7f\" (UniqueName: \"kubernetes.io/projected/4ef309b8-4aa2-411b-8620-af612f546585-kube-api-access-lfp7f\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133816 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133840 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133863 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-config\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133887 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133908 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133923 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-serving-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133960 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.133985 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smcrb\" (UniqueName: \"kubernetes.io/projected/f24d13cc-287a-4029-8151-a8a07dccd223-kube-api-access-smcrb\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134009 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-image-import-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134034 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-424kk\" (UniqueName: \"kubernetes.io/projected/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-kube-api-access-424kk\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134275 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134459 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-auth-proxy-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134460 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-node-pullsecrets\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.134878 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.135047 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-service-ca-bundle\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.135393 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-etcd-client\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.135429 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.135994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.136526 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.136929 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.137413 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-serving-cert\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.137650 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/793581cd-1066-4152-8a30-4004fa059137-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.137818 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-machine-approver-tls\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138129 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-config\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138145 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d3fb774-9244-48e1-8733-4f7e199e1c00-config\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138193 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138711 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138808 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.138910 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.139017 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.139505 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.139995 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-image-import-ca\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.140044 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f24d13cc-287a-4029-8151-a8a07dccd223-serving-cert\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.140063 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-audit-dir\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.140656 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.140854 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.141233 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/793581cd-1066-4152-8a30-4004fa059137-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.141365 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.141460 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.141500 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.141584 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.142419 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.142716 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-encryption-config\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.142906 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ef309b8-4aa2-411b-8620-af612f546585-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.144301 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.144392 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4d3fb774-9244-48e1-8733-4f7e199e1c00-serving-cert\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.154086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.156364 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.157637 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.171133 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.191542 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.211257 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.231316 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.271065 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.291279 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.311131 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.330993 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.351046 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.391513 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.411709 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.431294 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.451638 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.471352 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.491475 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.511049 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.532387 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.551525 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.571812 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.591191 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.611846 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.631658 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.651773 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.672134 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.691162 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.711094 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.730895 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.751902 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.772504 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.792327 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.811513 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.830916 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.851696 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.870946 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.892204 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.911483 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.931764 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.951267 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.972059 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.990085 4842 request.go:700] Waited for 1.016746122s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 11 13:32:09 crc kubenswrapper[4842]: I1111 13:32:09.991624 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.010542 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.032343 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.051079 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.072083 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.092649 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.112044 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.131864 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.152172 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.172771 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.190676 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.211044 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.231560 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.251594 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.271402 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.292065 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.311785 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.331869 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.351376 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.371388 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.391894 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.411401 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.431185 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.452373 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.471570 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.492007 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.511995 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.542931 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.551590 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.572010 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.591852 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.612157 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.633215 4842 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.651701 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.693614 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfjpq\" (UniqueName: \"kubernetes.io/projected/f16d8887-9c10-4144-a30d-f09a1feea711-kube-api-access-vfjpq\") pod \"etcd-operator-b45778765-wqftc\" (UID: \"f16d8887-9c10-4144-a30d-f09a1feea711\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.719800 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72zg9\" (UniqueName: \"kubernetes.io/projected/0c277b36-785e-4e8f-828e-17e36dac70be-kube-api-access-72zg9\") pod \"machine-api-operator-5694c8668f-97v95\" (UID: \"0c277b36-785e-4e8f-828e-17e36dac70be\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.724895 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.731455 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.751140 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.777562 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.792009 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.811399 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.831801 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.851667 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.870959 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.884908 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.891681 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.925166 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgrtv\" (UniqueName: \"kubernetes.io/projected/cf04d2ad-3dd4-418c-b9ea-9b749105b467-kube-api-access-qgrtv\") pod \"downloads-7954f5f757-tncv8\" (UID: \"cf04d2ad-3dd4-418c-b9ea-9b749105b467\") " pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.944531 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlqpc\" (UniqueName: \"kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc\") pod \"console-f9d7485db-lwbtb\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.965161 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpt8z\" (UniqueName: \"kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z\") pod \"oauth-openshift-558db77b4-gc82n\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.971031 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" Nov 11 13:32:10 crc kubenswrapper[4842]: I1111 13:32:10.986670 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-424kk\" (UniqueName: \"kubernetes.io/projected/371a8cf1-cc94-4f29-bc61-4a98b76f0c58-kube-api-access-424kk\") pod \"machine-approver-56656f9798-lgql9\" (UID: \"371a8cf1-cc94-4f29-bc61-4a98b76f0c58\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.004615 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dsmg\" (UniqueName: \"kubernetes.io/projected/4d3fb774-9244-48e1-8733-4f7e199e1c00-kube-api-access-4dsmg\") pod \"authentication-operator-69f744f599-x27gc\" (UID: \"4d3fb774-9244-48e1-8733-4f7e199e1c00\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.026470 4842 request.go:700] Waited for 1.888131254s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa/token Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.026543 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.043235 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.045924 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smcrb\" (UniqueName: \"kubernetes.io/projected/f24d13cc-287a-4029-8151-a8a07dccd223-kube-api-access-smcrb\") pod \"openshift-config-operator-7777fb866f-bs2dn\" (UID: \"f24d13cc-287a-4029-8151-a8a07dccd223\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.046213 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kbzk\" (UniqueName: \"kubernetes.io/projected/1d6301f1-a0a6-47f7-8fe1-7fa00daa867c-kube-api-access-7kbzk\") pod \"apiserver-76f77b778f-ppkl7\" (UID: \"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c\") " pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.071867 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.087573 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw9fp\" (UniqueName: \"kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp\") pod \"controller-manager-879f6c89f-w2kq7\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.094472 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.106266 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfp7f\" (UniqueName: \"kubernetes.io/projected/4ef309b8-4aa2-411b-8620-af612f546585-kube-api-access-lfp7f\") pod \"cluster-samples-operator-665b6dd947-qr9r2\" (UID: \"4ef309b8-4aa2-411b-8620-af612f546585\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.117904 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.127303 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.130618 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64x9q\" (UniqueName: \"kubernetes.io/projected/793581cd-1066-4152-8a30-4004fa059137-kube-api-access-64x9q\") pod \"cluster-image-registry-operator-dc59b4c8b-qt9tx\" (UID: \"793581cd-1066-4152-8a30-4004fa059137\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159598 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/659b5e72-ebf6-4446-86f2-402e0ad99bd3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159660 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159684 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-serving-cert\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159717 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159739 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-trusted-ca\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159768 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159791 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159812 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/873539c3-26e3-49ba-955d-274d0ad4c803-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159830 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-serving-cert\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159906 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159955 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.159981 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrplj\" (UniqueName: \"kubernetes.io/projected/03795f69-f5f2-4f7f-898f-1c87fd4cf567-kube-api-access-zrplj\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160027 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160079 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160119 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsj8h\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-kube-api-access-qsj8h\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160151 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c82ml\" (UniqueName: \"kubernetes.io/projected/873539c3-26e3-49ba-955d-274d0ad4c803-kube-api-access-c82ml\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160176 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659b5e72-ebf6-4446-86f2-402e0ad99bd3-config\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160207 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl825\" (UniqueName: \"kubernetes.io/projected/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-kube-api-access-jl825\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-encryption-config\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160270 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/873539c3-26e3-49ba-955d-274d0ad4c803-config\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160304 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160324 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-policies\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160343 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-dir\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160369 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-metrics-tls\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160399 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160418 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-trusted-ca\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160453 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-client\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160478 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160497 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxz7z\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160524 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-config\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160544 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/659b5e72-ebf6-4446-86f2-402e0ad99bd3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160580 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160613 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq5tv\" (UniqueName: \"kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160635 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.160759 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.161144 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:11.661130678 +0000 UTC m=+142.321420287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.176438 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.186937 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wqftc"] Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.206340 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf16d8887_9c10_4144_a30d_f09a1feea711.slice/crio-cb1a596119104e776e12aae0f1abfa1ffe1ddeabe1817897e33c2b4b38a2628b WatchSource:0}: Error finding container cb1a596119104e776e12aae0f1abfa1ffe1ddeabe1817897e33c2b4b38a2628b: Status 404 returned error can't find the container with id cb1a596119104e776e12aae0f1abfa1ffe1ddeabe1817897e33c2b4b38a2628b Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.208935 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-97v95"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261154 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.261272 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:11.761245631 +0000 UTC m=+142.421535250 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261388 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bef6af-57dc-4347-91dc-b57034ef5007-serving-cert\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261433 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq5ch\" (UniqueName: \"kubernetes.io/projected/462ebd3e-c3eb-4e6a-bebd-ee457a956356-kube-api-access-gq5ch\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261460 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261480 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-key\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261515 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/873539c3-26e3-49ba-955d-274d0ad4c803-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261530 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvm5z\" (UniqueName: \"kubernetes.io/projected/1f873d1b-d95b-4b8e-9c04-05821213f2cd-kube-api-access-cvm5z\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261547 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261562 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261579 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261594 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-srv-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261619 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261636 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/22efe903-5517-47b3-9889-9ea704b2c39f-proxy-tls\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261655 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-plugins-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261688 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrplj\" (UniqueName: \"kubernetes.io/projected/03795f69-f5f2-4f7f-898f-1c87fd4cf567-kube-api-access-zrplj\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261724 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-default-certificate\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261742 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261760 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsj8h\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-kube-api-access-qsj8h\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261776 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12b41511-3266-433f-9580-b102a55a087b-service-ca-bundle\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261795 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqxm6\" (UniqueName: \"kubernetes.io/projected/b9ef1fc5-8627-46f3-a164-e012c7dc4934-kube-api-access-cqxm6\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261847 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261875 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-mountpoint-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261904 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c82ml\" (UniqueName: \"kubernetes.io/projected/873539c3-26e3-49ba-955d-274d0ad4c803-kube-api-access-c82ml\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.261928 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7vbp\" (UniqueName: \"kubernetes.io/projected/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-kube-api-access-f7vbp\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262197 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262227 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-srv-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db5pt\" (UniqueName: \"kubernetes.io/projected/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-kube-api-access-db5pt\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262282 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262340 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-cabundle\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262359 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.262560 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263143 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/873539c3-26e3-49ba-955d-274d0ad4c803-config\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263172 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-certs\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263247 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-policies\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263266 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-dir\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263283 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf00a84f-494a-4ab6-8336-d08e94ddfe70-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263401 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-trusted-ca\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263438 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b8785d2-6888-4f85-bdbb-d58f0964e489-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263468 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r6tr\" (UniqueName: \"kubernetes.io/projected/3b8785d2-6888-4f85-bdbb-d58f0964e489-kube-api-access-7r6tr\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263484 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-profile-collector-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263509 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-client\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263560 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckxwp\" (UniqueName: \"kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263579 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a935336a-9188-4379-abeb-df3b0387281e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263595 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.263634 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxz7z\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.264928 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-policies\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.264952 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/03795f69-f5f2-4f7f-898f-1c87fd4cf567-audit-dir\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.264877 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/873539c3-26e3-49ba-955d-274d0ad4c803-config\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265461 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-config\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.265640 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:11.765625776 +0000 UTC m=+142.425915395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265681 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfm2w\" (UniqueName: \"kubernetes.io/projected/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-kube-api-access-pfm2w\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265703 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf00a84f-494a-4ab6-8336-d08e94ddfe70-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265789 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq5tv\" (UniqueName: \"kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265809 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ccml\" (UniqueName: \"kubernetes.io/projected/f9740f8b-8b9e-4ffa-a716-20d83abf1362-kube-api-access-7ccml\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265827 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29227\" (UniqueName: \"kubernetes.io/projected/12760c03-9304-4e86-8ff9-d297eda56122-kube-api-access-29227\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265884 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-node-bootstrap-token\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265900 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf00a84f-494a-4ab6-8336-d08e94ddfe70-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265937 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/659b5e72-ebf6-4446-86f2-402e0ad99bd3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265942 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-trusted-ca\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.265954 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-stats-auth\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266165 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5df99cba-50a8-40bf-be6d-93b43eccd4ea-tmpfs\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266230 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-serving-cert\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266256 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/033d2864-f866-455b-ab6d-9345c366ed86-metrics-tls\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266318 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266346 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9740f8b-8b9e-4ffa-a716-20d83abf1362-metrics-tls\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266373 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-registration-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266375 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266481 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-trusted-ca\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266512 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-metrics-certs\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266536 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92f22\" (UniqueName: \"kubernetes.io/projected/a935336a-9188-4379-abeb-df3b0387281e-kube-api-access-92f22\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266563 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snscg\" (UniqueName: \"kubernetes.io/projected/5df99cba-50a8-40bf-be6d-93b43eccd4ea-kube-api-access-snscg\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266587 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266614 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-config\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266635 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266636 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-config\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.266675 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-serving-cert\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267022 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dxfl\" (UniqueName: \"kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267076 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267158 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a935336a-9188-4379-abeb-df3b0387281e-proxy-tls\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267236 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-socket-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267264 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267307 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659b5e72-ebf6-4446-86f2-402e0ad99bd3-config\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267332 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmsm9\" (UniqueName: \"kubernetes.io/projected/12b41511-3266-433f-9580-b102a55a087b-kube-api-access-xmsm9\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267625 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl825\" (UniqueName: \"kubernetes.io/projected/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-kube-api-access-jl825\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267667 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-cert\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267694 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-webhook-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267719 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/033d2864-f866-455b-ab6d-9345c366ed86-config-volume\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267773 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-encryption-config\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267809 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267825 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267855 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267871 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267899 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-metrics-tls\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.267929 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v966c\" (UniqueName: \"kubernetes.io/projected/d5bef6af-57dc-4347-91dc-b57034ef5007-kube-api-access-v966c\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-client\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268091 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s7sv\" (UniqueName: \"kubernetes.io/projected/22efe903-5517-47b3-9889-9ea704b2c39f-kube-api-access-7s7sv\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268175 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/659b5e72-ebf6-4446-86f2-402e0ad99bd3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268194 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv48r\" (UniqueName: \"kubernetes.io/projected/033d2864-f866-455b-ab6d-9345c366ed86-kube-api-access-tv48r\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268260 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-297pv\" (UniqueName: \"kubernetes.io/projected/2f5f7d70-34ae-4eff-a77c-5f5736b80ea6-kube-api-access-297pv\") pod \"migrator-59844c95c7-zpgpg\" (UID: \"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268285 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268303 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6hgz\" (UniqueName: \"kubernetes.io/projected/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-kube-api-access-g6hgz\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268322 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bef6af-57dc-4347-91dc-b57034ef5007-config\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268340 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvd79\" (UniqueName: \"kubernetes.io/projected/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-kube-api-access-zvd79\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268358 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gphct\" (UniqueName: \"kubernetes.io/projected/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-kube-api-access-gphct\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268392 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268424 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-images\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268441 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-csi-data-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268470 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268487 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-apiservice-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.268508 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b8785d2-6888-4f85-bdbb-d58f0964e489-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.269489 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659b5e72-ebf6-4446-86f2-402e0ad99bd3-config\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.269878 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.269903 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.270275 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.271238 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.271762 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/873539c3-26e3-49ba-955d-274d0ad4c803-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.271912 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.272217 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-trusted-ca\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.272934 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-metrics-tls\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.273086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.273931 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-serving-cert\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.274463 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-encryption-config\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.275471 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/659b5e72-ebf6-4446-86f2-402e0ad99bd3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.276635 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03795f69-f5f2-4f7f-898f-1c87fd4cf567-serving-cert\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.276695 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/03795f69-f5f2-4f7f-898f-1c87fd4cf567-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.281034 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.311075 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.311447 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-x27gc"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.330295 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c82ml\" (UniqueName: \"kubernetes.io/projected/873539c3-26e3-49ba-955d-274d0ad4c803-kube-api-access-c82ml\") pod \"openshift-apiserver-operator-796bbdcf4f-slt62\" (UID: \"873539c3-26e3-49ba-955d-274d0ad4c803\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.352300 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.356820 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsj8h\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-kube-api-access-qsj8h\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.359465 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.367345 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370590 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370719 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370746 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-mountpoint-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370768 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7vbp\" (UniqueName: \"kubernetes.io/projected/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-kube-api-access-f7vbp\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370789 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db5pt\" (UniqueName: \"kubernetes.io/projected/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-kube-api-access-db5pt\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370810 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370836 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370857 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-srv-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370879 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-cabundle\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370900 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370921 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-certs\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370944 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf00a84f-494a-4ab6-8336-d08e94ddfe70-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370977 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.370999 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b8785d2-6888-4f85-bdbb-d58f0964e489-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371021 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r6tr\" (UniqueName: \"kubernetes.io/projected/3b8785d2-6888-4f85-bdbb-d58f0964e489-kube-api-access-7r6tr\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371042 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-profile-collector-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371074 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckxwp\" (UniqueName: \"kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371090 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a935336a-9188-4379-abeb-df3b0387281e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371136 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371161 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfm2w\" (UniqueName: \"kubernetes.io/projected/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-kube-api-access-pfm2w\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371178 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf00a84f-494a-4ab6-8336-d08e94ddfe70-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371196 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29227\" (UniqueName: \"kubernetes.io/projected/12760c03-9304-4e86-8ff9-d297eda56122-kube-api-access-29227\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371217 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ccml\" (UniqueName: \"kubernetes.io/projected/f9740f8b-8b9e-4ffa-a716-20d83abf1362-kube-api-access-7ccml\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371233 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-node-bootstrap-token\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371248 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf00a84f-494a-4ab6-8336-d08e94ddfe70-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371264 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-stats-auth\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371279 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5df99cba-50a8-40bf-be6d-93b43eccd4ea-tmpfs\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371302 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/033d2864-f866-455b-ab6d-9345c366ed86-metrics-tls\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9740f8b-8b9e-4ffa-a716-20d83abf1362-metrics-tls\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371331 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-registration-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371347 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92f22\" (UniqueName: \"kubernetes.io/projected/a935336a-9188-4379-abeb-df3b0387281e-kube-api-access-92f22\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371364 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snscg\" (UniqueName: \"kubernetes.io/projected/5df99cba-50a8-40bf-be6d-93b43eccd4ea-kube-api-access-snscg\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371384 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-metrics-certs\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371401 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371423 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-config\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371440 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dxfl\" (UniqueName: \"kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371456 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a935336a-9188-4379-abeb-df3b0387281e-proxy-tls\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371478 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371495 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-socket-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371514 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmsm9\" (UniqueName: \"kubernetes.io/projected/12b41511-3266-433f-9580-b102a55a087b-kube-api-access-xmsm9\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371543 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-cert\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371558 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-webhook-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371576 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/033d2864-f866-455b-ab6d-9345c366ed86-config-volume\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371607 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371625 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371644 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371678 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v966c\" (UniqueName: \"kubernetes.io/projected/d5bef6af-57dc-4347-91dc-b57034ef5007-kube-api-access-v966c\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371702 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s7sv\" (UniqueName: \"kubernetes.io/projected/22efe903-5517-47b3-9889-9ea704b2c39f-kube-api-access-7s7sv\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371725 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv48r\" (UniqueName: \"kubernetes.io/projected/033d2864-f866-455b-ab6d-9345c366ed86-kube-api-access-tv48r\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371741 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-297pv\" (UniqueName: \"kubernetes.io/projected/2f5f7d70-34ae-4eff-a77c-5f5736b80ea6-kube-api-access-297pv\") pod \"migrator-59844c95c7-zpgpg\" (UID: \"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371757 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvd79\" (UniqueName: \"kubernetes.io/projected/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-kube-api-access-zvd79\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371774 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6hgz\" (UniqueName: \"kubernetes.io/projected/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-kube-api-access-g6hgz\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371788 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bef6af-57dc-4347-91dc-b57034ef5007-config\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371804 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gphct\" (UniqueName: \"kubernetes.io/projected/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-kube-api-access-gphct\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371820 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-csi-data-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371842 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-images\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371859 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-apiservice-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371875 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b8785d2-6888-4f85-bdbb-d58f0964e489-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371893 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bef6af-57dc-4347-91dc-b57034ef5007-serving-cert\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371912 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq5ch\" (UniqueName: \"kubernetes.io/projected/462ebd3e-c3eb-4e6a-bebd-ee457a956356-kube-api-access-gq5ch\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-key\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371945 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvm5z\" (UniqueName: \"kubernetes.io/projected/1f873d1b-d95b-4b8e-9c04-05821213f2cd-kube-api-access-cvm5z\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371959 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371976 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.371990 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-srv-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.372004 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-plugins-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.372021 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/22efe903-5517-47b3-9889-9ea704b2c39f-proxy-tls\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.372041 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-default-certificate\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.372058 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12b41511-3266-433f-9580-b102a55a087b-service-ca-bundle\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.372072 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqxm6\" (UniqueName: \"kubernetes.io/projected/b9ef1fc5-8627-46f3-a164-e012c7dc4934-kube-api-access-cqxm6\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.373376 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf00a84f-494a-4ab6-8336-d08e94ddfe70-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.373471 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:11.87345719 +0000 UTC m=+142.533746809 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.374264 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrplj\" (UniqueName: \"kubernetes.io/projected/03795f69-f5f2-4f7f-898f-1c87fd4cf567-kube-api-access-zrplj\") pod \"apiserver-7bbb656c7d-krvwl\" (UID: \"03795f69-f5f2-4f7f-898f-1c87fd4cf567\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.374652 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.374765 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5df99cba-50a8-40bf-be6d-93b43eccd4ea-tmpfs\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.374996 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-cabundle\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.375005 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.375153 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-mountpoint-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.376730 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/033d2864-f866-455b-ab6d-9345c366ed86-config-volume\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.376954 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-cert\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.377090 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/22efe903-5517-47b3-9889-9ea704b2c39f-images\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.377548 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.378570 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b8785d2-6888-4f85-bdbb-d58f0964e489-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.379622 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a935336a-9188-4379-abeb-df3b0387281e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.382196 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-config\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.382822 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-webhook-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.382919 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.383895 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bef6af-57dc-4347-91dc-b57034ef5007-serving-cert\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.384296 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.385424 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-socket-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.388312 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-srv-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.388862 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc0a9e05_e827_4489_97df_473c19eb2732.slice/crio-2fa3d53315dd4e6c8c88553a1a33883aa8c204a1e7a0288d469c8d5d2907c1ad WatchSource:0}: Error finding container 2fa3d53315dd4e6c8c88553a1a33883aa8c204a1e7a0288d469c8d5d2907c1ad: Status 404 returned error can't find the container with id 2fa3d53315dd4e6c8c88553a1a33883aa8c204a1e7a0288d469c8d5d2907c1ad Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.389529 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12b41511-3266-433f-9580-b102a55a087b-service-ca-bundle\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.390197 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-registration-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.390569 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1f873d1b-d95b-4b8e-9c04-05821213f2cd-signing-key\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.390641 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-plugins-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.391373 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.391886 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-metrics-certs\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.392272 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-csi-data-dir\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.392803 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bef6af-57dc-4347-91dc-b57034ef5007-config\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.392898 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.392965 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5df99cba-50a8-40bf-be6d-93b43eccd4ea-apiservice-cert\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.393004 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a935336a-9188-4379-abeb-df3b0387281e-proxy-tls\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.396809 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.400706 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b8785d2-6888-4f85-bdbb-d58f0964e489-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.401075 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/033d2864-f866-455b-ab6d-9345c366ed86-metrics-tls\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.402076 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-default-certificate\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.402306 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-srv-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.402649 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/22efe903-5517-47b3-9889-9ea704b2c39f-proxy-tls\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.402795 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.402873 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/12760c03-9304-4e86-8ff9-d297eda56122-profile-collector-cert\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.403024 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf00a84f-494a-4ab6-8336-d08e94ddfe70-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.403296 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.403313 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/462ebd3e-c3eb-4e6a-bebd-ee457a956356-profile-collector-cert\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.403330 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9740f8b-8b9e-4ffa-a716-20d83abf1362-metrics-tls\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.403640 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-certs\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.404697 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxz7z\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.404917 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.406571 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12b41511-3266-433f-9580-b102a55a087b-stats-auth\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.409782 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.410164 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b9ef1fc5-8627-46f3-a164-e012c7dc4934-node-bootstrap-token\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.413680 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.416866 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.420064 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.422393 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3ace4a93-471f-4bc1-aded-b6aadf15f3c1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-95x5l\" (UID: \"3ace4a93-471f-4bc1-aded-b6aadf15f3c1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.428016 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl825\" (UniqueName: \"kubernetes.io/projected/ef8bc024-9a73-46be-9d0a-853e3ca0b0cb-kube-api-access-jl825\") pod \"console-operator-58897d9998-jczw6\" (UID: \"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb\") " pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.448749 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/659b5e72-ebf6-4446-86f2-402e0ad99bd3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-tb95g\" (UID: \"659b5e72-ebf6-4446-86f2-402e0ad99bd3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.470818 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq5tv\" (UniqueName: \"kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv\") pod \"route-controller-manager-6576b87f9c-v984h\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.470975 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tncv8"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.473745 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.474358 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.474741 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:11.974728938 +0000 UTC m=+142.635018547 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.490633 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.498377 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.509814 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqxm6\" (UniqueName: \"kubernetes.io/projected/b9ef1fc5-8627-46f3-a164-e012c7dc4934-kube-api-access-cqxm6\") pod \"machine-config-server-x55hn\" (UID: \"b9ef1fc5-8627-46f3-a164-e012c7dc4934\") " pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.524897 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfm2w\" (UniqueName: \"kubernetes.io/projected/b0401a69-94e4-473f-8f8a-795a1eeb7e9a-kube-api-access-pfm2w\") pod \"ingress-canary-6jh72\" (UID: \"b0401a69-94e4-473f-8f8a-795a1eeb7e9a\") " pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.539129 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.544560 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r6tr\" (UniqueName: \"kubernetes.io/projected/3b8785d2-6888-4f85-bdbb-d58f0964e489-kube-api-access-7r6tr\") pod \"openshift-controller-manager-operator-756b6f6bc6-zgv6x\" (UID: \"3b8785d2-6888-4f85-bdbb-d58f0964e489\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.553456 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.568666 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7vbp\" (UniqueName: \"kubernetes.io/projected/7f1f3854-cd3e-4e04-bdcf-ea8bcec64728-kube-api-access-f7vbp\") pod \"package-server-manager-789f6589d5-p8sjn\" (UID: \"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.575757 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.575908 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.075882743 +0000 UTC m=+142.736172362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.576171 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.576479 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.076466981 +0000 UTC m=+142.736756600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.587428 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db5pt\" (UniqueName: \"kubernetes.io/projected/ea6fb48c-8a20-46c1-a4d0-30ffb0040560-kube-api-access-db5pt\") pod \"csi-hostpathplugin-jkk9p\" (UID: \"ea6fb48c-8a20-46c1-a4d0-30ffb0040560\") " pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.591630 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ppkl7"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.599666 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.603992 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq5ch\" (UniqueName: \"kubernetes.io/projected/462ebd3e-c3eb-4e6a-bebd-ee457a956356-kube-api-access-gq5ch\") pod \"olm-operator-6b444d44fb-qr7hq\" (UID: \"462ebd3e-c3eb-4e6a-bebd-ee457a956356\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.608260 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf24d13cc_287a_4029_8151_a8a07dccd223.slice/crio-8ab235db8bf47a08f2696ce8a3dfeba4ac5fcc6be874704d378852ad9f2cbbde WatchSource:0}: Error finding container 8ab235db8bf47a08f2696ce8a3dfeba4ac5fcc6be874704d378852ad9f2cbbde: Status 404 returned error can't find the container with id 8ab235db8bf47a08f2696ce8a3dfeba4ac5fcc6be874704d378852ad9f2cbbde Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.617632 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx"] Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.618731 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d6301f1_a0a6_47f7_8fe1_7fa00daa867c.slice/crio-35d29be23aea161c89b3a00b568ba02c31611d5b1418fe60400a53a3cb726399 WatchSource:0}: Error finding container 35d29be23aea161c89b3a00b568ba02c31611d5b1418fe60400a53a3cb726399: Status 404 returned error can't find the container with id 35d29be23aea161c89b3a00b568ba02c31611d5b1418fe60400a53a3cb726399 Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.625547 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffddac0e-5b23-4ced-93a0-6045f2d8a12f-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8fdf6\" (UID: \"ffddac0e-5b23-4ced-93a0-6045f2d8a12f\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.635566 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod793581cd_1066_4152_8a30_4004fa059137.slice/crio-4e70daef048adee18130375f6de2dea3469103a8024ed3afdf73eb9ec176d9f8 WatchSource:0}: Error finding container 4e70daef048adee18130375f6de2dea3469103a8024ed3afdf73eb9ec176d9f8: Status 404 returned error can't find the container with id 4e70daef048adee18130375f6de2dea3469103a8024ed3afdf73eb9ec176d9f8 Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.644697 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckxwp\" (UniqueName: \"kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp\") pod \"collect-profiles-29381130-ldl4m\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.666026 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.667127 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dxfl\" (UniqueName: \"kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl\") pod \"marketplace-operator-79b997595-2wgl8\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.673606 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.683075 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.683673 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.183657344 +0000 UTC m=+142.843946963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.684230 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.688196 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.691341 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvm5z\" (UniqueName: \"kubernetes.io/projected/1f873d1b-d95b-4b8e-9c04-05821213f2cd-kube-api-access-cvm5z\") pod \"service-ca-9c57cc56f-jwb8g\" (UID: \"1f873d1b-d95b-4b8e-9c04-05821213f2cd\") " pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.707255 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf00a84f-494a-4ab6-8336-d08e94ddfe70-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ztl26\" (UID: \"bf00a84f-494a-4ab6-8336-d08e94ddfe70\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.709018 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.723292 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-x55hn" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.727421 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29227\" (UniqueName: \"kubernetes.io/projected/12760c03-9304-4e86-8ff9-d297eda56122-kube-api-access-29227\") pod \"catalog-operator-68c6474976-mhlzl\" (UID: \"12760c03-9304-4e86-8ff9-d297eda56122\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.729962 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-6jh72" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.745429 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ccml\" (UniqueName: \"kubernetes.io/projected/f9740f8b-8b9e-4ffa-a716-20d83abf1362-kube-api-access-7ccml\") pod \"dns-operator-744455d44c-9k5gp\" (UID: \"f9740f8b-8b9e-4ffa-a716-20d83abf1362\") " pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.751305 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.766076 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92f22\" (UniqueName: \"kubernetes.io/projected/a935336a-9188-4379-abeb-df3b0387281e-kube-api-access-92f22\") pod \"machine-config-controller-84d6567774-2vg2q\" (UID: \"a935336a-9188-4379-abeb-df3b0387281e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.784473 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.784820 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.284802709 +0000 UTC m=+142.945092328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.785296 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snscg\" (UniqueName: \"kubernetes.io/projected/5df99cba-50a8-40bf-be6d-93b43eccd4ea-kube-api-access-snscg\") pod \"packageserver-d55dfcdfc-hw59z\" (UID: \"5df99cba-50a8-40bf-be6d-93b43eccd4ea\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.809595 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.821712 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v966c\" (UniqueName: \"kubernetes.io/projected/d5bef6af-57dc-4347-91dc-b57034ef5007-kube-api-access-v966c\") pod \"service-ca-operator-777779d784-22v98\" (UID: \"d5bef6af-57dc-4347-91dc-b57034ef5007\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.825540 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.826015 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-297pv\" (UniqueName: \"kubernetes.io/projected/2f5f7d70-34ae-4eff-a77c-5f5736b80ea6-kube-api-access-297pv\") pod \"migrator-59844c95c7-zpgpg\" (UID: \"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.848902 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s7sv\" (UniqueName: \"kubernetes.io/projected/22efe903-5517-47b3-9889-9ea704b2c39f-kube-api-access-7s7sv\") pod \"machine-config-operator-74547568cd-56fvg\" (UID: \"22efe903-5517-47b3-9889-9ea704b2c39f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.850062 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" event={"ID":"a25deeed-3854-4f02-aa77-b7e616f2f2b8","Type":"ContainerStarted","Data":"4df0f5d72c297fc1c2b3d8f284ae30311fc4821656ce1602d58077fae2a07d0b"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.850955 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lwbtb" event={"ID":"cc0a9e05-e827-4489-97df-473c19eb2732","Type":"ContainerStarted","Data":"2fa3d53315dd4e6c8c88553a1a33883aa8c204a1e7a0288d469c8d5d2907c1ad"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.851727 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" event={"ID":"f16d8887-9c10-4144-a30d-f09a1feea711","Type":"ContainerStarted","Data":"cb1a596119104e776e12aae0f1abfa1ffe1ddeabe1817897e33c2b4b38a2628b"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.853545 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" event={"ID":"793581cd-1066-4152-8a30-4004fa059137","Type":"ContainerStarted","Data":"4e70daef048adee18130375f6de2dea3469103a8024ed3afdf73eb9ec176d9f8"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.854768 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tncv8" event={"ID":"cf04d2ad-3dd4-418c-b9ea-9b749105b467","Type":"ContainerStarted","Data":"952f8a6e9c41b2f5b8163259ea06aec5cee9506c25f85380a8c5b9dc2cae549a"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.869723 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.872412 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv48r\" (UniqueName: \"kubernetes.io/projected/033d2864-f866-455b-ab6d-9345c366ed86-kube-api-access-tv48r\") pod \"dns-default-svrwd\" (UID: \"033d2864-f866-455b-ab6d-9345c366ed86\") " pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.878995 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.880619 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" event={"ID":"0c277b36-785e-4e8f-828e-17e36dac70be","Type":"ContainerStarted","Data":"e20ae4e66281a0d2e3b017a4d52002d4fc2181de4ff86239357bc5d18a8bde58"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.881555 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" event={"ID":"f24d13cc-287a-4029-8151-a8a07dccd223","Type":"ContainerStarted","Data":"8ab235db8bf47a08f2696ce8a3dfeba4ac5fcc6be874704d378852ad9f2cbbde"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.882459 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" event={"ID":"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c","Type":"ContainerStarted","Data":"35d29be23aea161c89b3a00b568ba02c31611d5b1418fe60400a53a3cb726399"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.883196 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" event={"ID":"4d3fb774-9244-48e1-8733-4f7e199e1c00","Type":"ContainerStarted","Data":"24280f69d1a18c05c0814c9308a8110fa4581cd072b9bccdb08c419517c5c9bb"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.883918 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" event={"ID":"f34c36d1-6785-42b0-8b27-4ba2e00c4db1","Type":"ContainerStarted","Data":"efda1bc6ee03189da718ca0f50d31ec070a1779aee1bef967264a4a1b990a1a3"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.884601 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" event={"ID":"371a8cf1-cc94-4f29-bc61-4a98b76f0c58","Type":"ContainerStarted","Data":"7aec49c093796349ca9493ca9f2d3825ae42b2ce9f3c3b6313c1a22b16b56177"} Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.884780 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.885067 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.885195 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.385173179 +0000 UTC m=+143.045462798 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.885255 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.885517 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.38550988 +0000 UTC m=+143.045799489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.886158 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvd79\" (UniqueName: \"kubernetes.io/projected/e4ac96a0-0de8-47d1-b101-4054af9c7fe0-kube-api-access-zvd79\") pod \"control-plane-machine-set-operator-78cbb6b69f-6tqv7\" (UID: \"e4ac96a0-0de8-47d1-b101-4054af9c7fe0\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.892417 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.906832 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.912655 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gphct\" (UniqueName: \"kubernetes.io/projected/c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6-kube-api-access-gphct\") pod \"kube-storage-version-migrator-operator-b67b599dd-85wk5\" (UID: \"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.917974 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.923778 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.927715 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmsm9\" (UniqueName: \"kubernetes.io/projected/12b41511-3266-433f-9580-b102a55a087b-kube-api-access-xmsm9\") pod \"router-default-5444994796-9x6tf\" (UID: \"12b41511-3266-433f-9580-b102a55a087b\") " pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.929793 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.939793 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.944365 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.951015 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.953764 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6hgz\" (UniqueName: \"kubernetes.io/projected/72a452f5-5d0f-4728-88b4-0d6bdeaf3149-kube-api-access-g6hgz\") pod \"multus-admission-controller-857f4d67dd-5j9w4\" (UID: \"72a452f5-5d0f-4728-88b4-0d6bdeaf3149\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.960869 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.972060 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-jczw6"] Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.986862 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.987024 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.487002646 +0000 UTC m=+143.147292275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: I1111 13:32:11.987122 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:11 crc kubenswrapper[4842]: E1111 13:32:11.987479 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.487472579 +0000 UTC m=+143.147762198 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:11 crc kubenswrapper[4842]: W1111 13:32:11.988144 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03795f69_f5f2_4f7f_898f_1c87fd4cf567.slice/crio-5b77ac0bd7483d7b8281671d22eba3106ba65db94287769030f47d5e8c714b6e WatchSource:0}: Error finding container 5b77ac0bd7483d7b8281671d22eba3106ba65db94287769030f47d5e8c714b6e: Status 404 returned error can't find the container with id 5b77ac0bd7483d7b8281671d22eba3106ba65db94287769030f47d5e8c714b6e Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.017164 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.087896 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.088240 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.588226842 +0000 UTC m=+143.248516461 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.098750 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.116000 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.132302 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.189859 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.190169 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.690158562 +0000 UTC m=+143.350448181 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.215530 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.222759 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.282609 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.290343 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.290453 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.790431939 +0000 UTC m=+143.450721568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.290595 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.291972 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.791963157 +0000 UTC m=+143.452252766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.326837 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq"] Nov 11 13:32:12 crc kubenswrapper[4842]: W1111 13:32:12.364887 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod462ebd3e_c3eb_4e6a_bebd_ee457a956356.slice/crio-acf94f5a850ed39fd04552bb77c4660f81c81f3235da4406c8e46f894fb25de5 WatchSource:0}: Error finding container acf94f5a850ed39fd04552bb77c4660f81c81f3235da4406c8e46f894fb25de5: Status 404 returned error can't find the container with id acf94f5a850ed39fd04552bb77c4660f81c81f3235da4406c8e46f894fb25de5 Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.390911 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.391252 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.891233083 +0000 UTC m=+143.551522702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.492348 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.492751 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:12.992739369 +0000 UTC m=+143.653028988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.498077 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.593363 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.593713 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.093689098 +0000 UTC m=+143.753978717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.593757 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.594181 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.094168453 +0000 UTC m=+143.754458072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.697897 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.698264 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.198239629 +0000 UTC m=+143.858529248 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.698488 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.698994 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.198984681 +0000 UTC m=+143.859274300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.801805 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jkk9p"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.805475 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.805649 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.305621257 +0000 UTC m=+143.965910876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.805705 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.806083 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.306071921 +0000 UTC m=+143.966361600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.806895 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.832190 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x"] Nov 11 13:32:12 crc kubenswrapper[4842]: W1111 13:32:12.862460 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b8785d2_6888_4f85_bdbb_d58f0964e489.slice/crio-40db8c9c89bab18f93b8c3669e0a8cae5ce34e63aa5dcd0812e2cc627c747bd7 WatchSource:0}: Error finding container 40db8c9c89bab18f93b8c3669e0a8cae5ce34e63aa5dcd0812e2cc627c747bd7: Status 404 returned error can't find the container with id 40db8c9c89bab18f93b8c3669e0a8cae5ce34e63aa5dcd0812e2cc627c747bd7 Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.868975 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5j9w4"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.881605 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.885917 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q"] Nov 11 13:32:12 crc kubenswrapper[4842]: W1111 13:32:12.888542 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea6fb48c_8a20_46c1_a4d0_30ffb0040560.slice/crio-dc1db4cdadf0c796215afd033ca837a52ce514e7c6d269bb38f36a476d029e28 WatchSource:0}: Error finding container dc1db4cdadf0c796215afd033ca837a52ce514e7c6d269bb38f36a476d029e28: Status 404 returned error can't find the container with id dc1db4cdadf0c796215afd033ca837a52ce514e7c6d269bb38f36a476d029e28 Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.889425 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" event={"ID":"3b8785d2-6888-4f85-bdbb-d58f0964e489","Type":"ContainerStarted","Data":"40db8c9c89bab18f93b8c3669e0a8cae5ce34e63aa5dcd0812e2cc627c747bd7"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.890697 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-x55hn" event={"ID":"b9ef1fc5-8627-46f3-a164-e012c7dc4934","Type":"ContainerStarted","Data":"c99889d8d8b27439813809cf32fd30dcd2db292872a955ee6a2a507461308c55"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.892360 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" event={"ID":"3ace4a93-471f-4bc1-aded-b6aadf15f3c1","Type":"ContainerStarted","Data":"650f87432b204059b7a3826c3afd158551722a17861cc7cea5eef60b7f44e601"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.893373 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" event={"ID":"0c277b36-785e-4e8f-828e-17e36dac70be","Type":"ContainerStarted","Data":"cb6242f3a0bc81e1358f2cc8011f8a0fd968cd09592dd4ddbefa8f485db5a332"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.894540 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lwbtb" event={"ID":"cc0a9e05-e827-4489-97df-473c19eb2732","Type":"ContainerStarted","Data":"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.895326 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" event={"ID":"873539c3-26e3-49ba-955d-274d0ad4c803","Type":"ContainerStarted","Data":"0b5f547e8b97863865e3496f6607a900a066c63a4207056452b1ccbed0a9692d"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.896486 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" event={"ID":"659b5e72-ebf6-4446-86f2-402e0ad99bd3","Type":"ContainerStarted","Data":"3289a573bbd464aa920c2e697e1ae7a09983ffbd6334e9790f78edcf9a53f633"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.897383 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" event={"ID":"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728","Type":"ContainerStarted","Data":"d6ded9ccf5bb58c0ba07c8fdc001827e53e2a68e1d1cc07ff5a3bd3fd626882b"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.898737 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" event={"ID":"371a8cf1-cc94-4f29-bc61-4a98b76f0c58","Type":"ContainerStarted","Data":"1fbdb18a580e2942105a6c47a0361e6382878930028c71b401e0aa08433dab93"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.899693 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" event={"ID":"b562af49-ec32-42e1-86ee-48b4d7d9e3e2","Type":"ContainerStarted","Data":"c533a3113c2494d70d7005b0801d0279a7d551f884630b486c8e8fd68ad73eea"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.900729 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" event={"ID":"4d3fb774-9244-48e1-8733-4f7e199e1c00","Type":"ContainerStarted","Data":"f3232e96bbb6cde826c8d43f62cf60b76b8beaa3d340281947b6afaae99fee37"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.901276 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-jczw6" event={"ID":"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb","Type":"ContainerStarted","Data":"d213b86fc56916c92b8e1ce27462dc7b7a86c082e43868957df7b9c7b2235785"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.901951 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" event={"ID":"462ebd3e-c3eb-4e6a-bebd-ee457a956356","Type":"ContainerStarted","Data":"acf94f5a850ed39fd04552bb77c4660f81c81f3235da4406c8e46f894fb25de5"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.902612 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" event={"ID":"4ef309b8-4aa2-411b-8620-af612f546585","Type":"ContainerStarted","Data":"231c736824c2037cb25f4dcb8be2ce009e42c3e02e29626fd1f7912b5a2cbd6d"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.903739 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" event={"ID":"f16d8887-9c10-4144-a30d-f09a1feea711","Type":"ContainerStarted","Data":"0af9e0bc539c06202442426a37335b586225423b78e0e0a4ac2e0297fe2e8841"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.904602 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" event={"ID":"03795f69-f5f2-4f7f-898f-1c87fd4cf567","Type":"ContainerStarted","Data":"5b77ac0bd7483d7b8281671d22eba3106ba65db94287769030f47d5e8c714b6e"} Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.907098 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:12 crc kubenswrapper[4842]: E1111 13:32:12.907410 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.407365971 +0000 UTC m=+144.067655590 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.930928 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-6jh72"] Nov 11 13:32:12 crc kubenswrapper[4842]: W1111 13:32:12.934046 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37480fb1_03b9_4913_a336_c18363b1e85e.slice/crio-96617cd32ea141498a62f5f2a9c71e998406aa89f57e1d4387405c7c4e790b37 WatchSource:0}: Error finding container 96617cd32ea141498a62f5f2a9c71e998406aa89f57e1d4387405c7c4e790b37: Status 404 returned error can't find the container with id 96617cd32ea141498a62f5f2a9c71e998406aa89f57e1d4387405c7c4e790b37 Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.943790 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jwb8g"] Nov 11 13:32:12 crc kubenswrapper[4842]: W1111 13:32:12.947280 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72a452f5_5d0f_4728_88b4_0d6bdeaf3149.slice/crio-281f1c9b300cfdc29c02c853fde6872b4367c310cc9987e681f6056d7fb29ce1 WatchSource:0}: Error finding container 281f1c9b300cfdc29c02c853fde6872b4367c310cc9987e681f6056d7fb29ce1: Status 404 returned error can't find the container with id 281f1c9b300cfdc29c02c853fde6872b4367c310cc9987e681f6056d7fb29ce1 Nov 11 13:32:12 crc kubenswrapper[4842]: I1111 13:32:12.952787 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-svrwd"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.009851 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.010266 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.510252899 +0000 UTC m=+144.170542518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.042820 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda935336a_9188_4379_abeb_df3b0387281e.slice/crio-7fc08437bf683d14ac4e3fa92ef41d6418cf510abd05e8b70a15df7c729dc755 WatchSource:0}: Error finding container 7fc08437bf683d14ac4e3fa92ef41d6418cf510abd05e8b70a15df7c729dc755: Status 404 returned error can't find the container with id 7fc08437bf683d14ac4e3fa92ef41d6418cf510abd05e8b70a15df7c729dc755 Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.122655 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.122929 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.622856751 +0000 UTC m=+144.283146380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.123069 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.123428 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.623418728 +0000 UTC m=+144.283708347 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.166283 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod479b59cc_2cef_4728_a3c8_df498efbeb99.slice/crio-2e8b053dba38330124ece7e2e6dd50ce4ab611b99c8f881276d30a9902a86834 WatchSource:0}: Error finding container 2e8b053dba38330124ece7e2e6dd50ce4ab611b99c8f881276d30a9902a86834: Status 404 returned error can't find the container with id 2e8b053dba38330124ece7e2e6dd50ce4ab611b99c8f881276d30a9902a86834 Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.223982 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.224352 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.724333336 +0000 UTC m=+144.384622955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.253857 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0401a69_94e4_473f_8f8a_795a1eeb7e9a.slice/crio-456d7591961b56b1ccb55d9259d9f05a44239314a05671bab180f841c5e355b2 WatchSource:0}: Error finding container 456d7591961b56b1ccb55d9259d9f05a44239314a05671bab180f841c5e355b2: Status 404 returned error can't find the container with id 456d7591961b56b1ccb55d9259d9f05a44239314a05671bab180f841c5e355b2 Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.266027 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod033d2864_f866_455b_ab6d_9345c366ed86.slice/crio-da18bd657716cd9dae5e5357673dd5624f3b413d5cde391a55bdc09f807a2bcd WatchSource:0}: Error finding container da18bd657716cd9dae5e5357673dd5624f3b413d5cde391a55bdc09f807a2bcd: Status 404 returned error can't find the container with id da18bd657716cd9dae5e5357673dd5624f3b413d5cde391a55bdc09f807a2bcd Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.326253 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.327521 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.827503183 +0000 UTC m=+144.487792802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.332596 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9k5gp"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.363460 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.427698 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.430008 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.929987639 +0000 UTC m=+144.590277268 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.432374 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.433076 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.435590 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:13.935573813 +0000 UTC m=+144.595863432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.451994 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12760c03_9304_4e86_8ff9_d297eda56122.slice/crio-8bedc11dbc76614ad25c8db373edf0d1488a950ecce82dc1888c143667c7ee0d WatchSource:0}: Error finding container 8bedc11dbc76614ad25c8db373edf0d1488a950ecce82dc1888c143667c7ee0d: Status 404 returned error can't find the container with id 8bedc11dbc76614ad25c8db373edf0d1488a950ecce82dc1888c143667c7ee0d Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.496554 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-wqftc" podStartSLOduration=123.496534588 podStartE2EDuration="2m3.496534588s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:13.492933907 +0000 UTC m=+144.153223526" watchObservedRunningTime="2025-11-11 13:32:13.496534588 +0000 UTC m=+144.156824207" Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.500098 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.524947 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.538994 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.540641 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.040613299 +0000 UTC m=+144.700902918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.541292 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.541567 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.541950 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.04193522 +0000 UTC m=+144.702224849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.562451 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-x27gc" podStartSLOduration=124.562431448 podStartE2EDuration="2m4.562431448s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:13.560733154 +0000 UTC m=+144.221022773" watchObservedRunningTime="2025-11-11 13:32:13.562431448 +0000 UTC m=+144.222721067" Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.608091 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-22v98"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.642156 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.642301 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.14227729 +0000 UTC m=+144.802566899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.642522 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.642835 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.142828367 +0000 UTC m=+144.803117976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.698029 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.702637 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.706757 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.722269 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg"] Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.743214 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.743405 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.243390143 +0000 UTC m=+144.903679762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.743542 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.743881 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.243865028 +0000 UTC m=+144.904154647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.827182 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0c4d8fb_fcbb_43fb_94ad_0cfe4d560fd6.slice/crio-c1045f60d341d15540496fc13b56d4144db8209e4bf576baaf49fdebfb837bb4 WatchSource:0}: Error finding container c1045f60d341d15540496fc13b56d4144db8209e4bf576baaf49fdebfb837bb4: Status 404 returned error can't find the container with id c1045f60d341d15540496fc13b56d4144db8209e4bf576baaf49fdebfb837bb4 Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.829886 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5df99cba_50a8_40bf_be6d_93b43eccd4ea.slice/crio-7265290ccba9c8704c96c84c7ab71be061d7e76ce95e5a6c329a94e4c8aaa483 WatchSource:0}: Error finding container 7265290ccba9c8704c96c84c7ab71be061d7e76ce95e5a6c329a94e4c8aaa483: Status 404 returned error can't find the container with id 7265290ccba9c8704c96c84c7ab71be061d7e76ce95e5a6c329a94e4c8aaa483 Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.834945 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5bef6af_57dc_4347_91dc_b57034ef5007.slice/crio-d787e57f6498782a3e02eb47466056cf076267d30f3a26caa8866ca753620a67 WatchSource:0}: Error finding container d787e57f6498782a3e02eb47466056cf076267d30f3a26caa8866ca753620a67: Status 404 returned error can't find the container with id d787e57f6498782a3e02eb47466056cf076267d30f3a26caa8866ca753620a67 Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.844338 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.844994 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.344976872 +0000 UTC m=+145.005266491 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.866287 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffddac0e_5b23_4ced_93a0_6045f2d8a12f.slice/crio-444c5e18d333db54a3eb32e6c3d821b7f3f8d7d50847cc5f38497aa8c25ab9e4 WatchSource:0}: Error finding container 444c5e18d333db54a3eb32e6c3d821b7f3f8d7d50847cc5f38497aa8c25ab9e4: Status 404 returned error can't find the container with id 444c5e18d333db54a3eb32e6c3d821b7f3f8d7d50847cc5f38497aa8c25ab9e4 Nov 11 13:32:13 crc kubenswrapper[4842]: W1111 13:32:13.867712 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f5f7d70_34ae_4eff_a77c_5f5736b80ea6.slice/crio-72b4e4ead7e786f0080165875c9e3004c7b98c31c4cb6d5c1463b2a1369d0c9b WatchSource:0}: Error finding container 72b4e4ead7e786f0080165875c9e3004c7b98c31c4cb6d5c1463b2a1369d0c9b: Status 404 returned error can't find the container with id 72b4e4ead7e786f0080165875c9e3004c7b98c31c4cb6d5c1463b2a1369d0c9b Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.921347 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" event={"ID":"22efe903-5517-47b3-9889-9ea704b2c39f","Type":"ContainerStarted","Data":"e72713beee172056803d538919ddd11389a3b1959e5a7ca32fcdf36a54c75e8a"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.946969 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:13 crc kubenswrapper[4842]: E1111 13:32:13.947280 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.447269442 +0000 UTC m=+145.107559061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.949068 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" event={"ID":"f9740f8b-8b9e-4ffa-a716-20d83abf1362","Type":"ContainerStarted","Data":"7510a586e5a97e258a758d4ce2e1d3f317eaef4806bea34f79dc7dd616292400"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.957249 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" event={"ID":"1f873d1b-d95b-4b8e-9c04-05821213f2cd","Type":"ContainerStarted","Data":"cfe9ff12d3277a80d34536f75553b81ffed95c1cdbf57057a59d589afb0aa95c"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.971058 4842 generic.go:334] "Generic (PLEG): container finished" podID="1d6301f1-a0a6-47f7-8fe1-7fa00daa867c" containerID="74bdc09f23408db75be18bb181ac8adc52e275af102c11de7f5c7100c08c9106" exitCode=0 Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.971454 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" event={"ID":"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c","Type":"ContainerDied","Data":"74bdc09f23408db75be18bb181ac8adc52e275af102c11de7f5c7100c08c9106"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.982011 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-jczw6" event={"ID":"ef8bc024-9a73-46be-9d0a-853e3ca0b0cb","Type":"ContainerStarted","Data":"a4ab01f15a41e9be0fabab886cdbffed931fdec97ff7ee8a936ac3bb1ed85037"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.982350 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.988172 4842 patch_prober.go:28] interesting pod/console-operator-58897d9998-jczw6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.988238 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-jczw6" podUID="ef8bc024-9a73-46be-9d0a-853e3ca0b0cb" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.989745 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" event={"ID":"3ace4a93-471f-4bc1-aded-b6aadf15f3c1","Type":"ContainerStarted","Data":"e60a58faeffd4b509b009c0f5f3ec9f2fb64acd5c132f56a0a9d08b46cd6f172"} Nov 11 13:32:13 crc kubenswrapper[4842]: I1111 13:32:13.997678 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" event={"ID":"f24d13cc-287a-4029-8151-a8a07dccd223","Type":"ContainerStarted","Data":"ee0b164faad3bfd91f4115776b94e96689b64fccbb2baa47da56ca72fd648634"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.018051 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-svrwd" event={"ID":"033d2864-f866-455b-ab6d-9345c366ed86","Type":"ContainerStarted","Data":"da18bd657716cd9dae5e5357673dd5624f3b413d5cde391a55bdc09f807a2bcd"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.033783 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" event={"ID":"462ebd3e-c3eb-4e6a-bebd-ee457a956356","Type":"ContainerStarted","Data":"d952ec7bc8f6af23d2c6bf1dfffbaddca3e77f6f42d49045d7f3e4a7e8d8996a"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.034629 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.037603 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" event={"ID":"a935336a-9188-4379-abeb-df3b0387281e","Type":"ContainerStarted","Data":"7fc08437bf683d14ac4e3fa92ef41d6418cf510abd05e8b70a15df7c729dc755"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.045522 4842 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-qr7hq container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.045589 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" podUID="462ebd3e-c3eb-4e6a-bebd-ee457a956356" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.049053 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-jczw6" podStartSLOduration=124.049025266 podStartE2EDuration="2m4.049025266s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.045781455 +0000 UTC m=+144.706071084" watchObservedRunningTime="2025-11-11 13:32:14.049025266 +0000 UTC m=+144.709314885" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.050691 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.051187 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.551170853 +0000 UTC m=+145.211460472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.110610 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" event={"ID":"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728","Type":"ContainerStarted","Data":"f4f1358a6eb136faf7a8d05f44bea3d801da07999745bdedd711058b14736b3a"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.111131 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" event={"ID":"72a452f5-5d0f-4728-88b4-0d6bdeaf3149","Type":"ContainerStarted","Data":"281f1c9b300cfdc29c02c853fde6872b4367c310cc9987e681f6056d7fb29ce1"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.111169 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9x6tf" event={"ID":"12b41511-3266-433f-9580-b102a55a087b","Type":"ContainerStarted","Data":"1ab531f4a0f388f98f06adf6801df1ca6e5de80eacedc7004dec8600a07b8d06"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.127636 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" event={"ID":"5df99cba-50a8-40bf-be6d-93b43eccd4ea","Type":"ContainerStarted","Data":"7265290ccba9c8704c96c84c7ab71be061d7e76ce95e5a6c329a94e4c8aaa483"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.132564 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" event={"ID":"bf00a84f-494a-4ab6-8336-d08e94ddfe70","Type":"ContainerStarted","Data":"ab6b29e1f8d3a69f850e997473c56fdf9fb568f127d975390ca29ce460183cf7"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.138570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" event={"ID":"873539c3-26e3-49ba-955d-274d0ad4c803","Type":"ContainerStarted","Data":"c32f7e691b9dc07aa699cd7e5a5e61557c99e6151b1cf68b0876189f8f7b978c"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.152496 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.153451 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" podStartSLOduration=124.153436583 podStartE2EDuration="2m4.153436583s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.068494521 +0000 UTC m=+144.728784170" watchObservedRunningTime="2025-11-11 13:32:14.153436583 +0000 UTC m=+144.813726202" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.154329 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-slt62" podStartSLOduration=125.154316749 podStartE2EDuration="2m5.154316749s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.153942129 +0000 UTC m=+144.814231748" watchObservedRunningTime="2025-11-11 13:32:14.154316749 +0000 UTC m=+144.814606368" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.155453 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.655434875 +0000 UTC m=+145.315724544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.155470 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" event={"ID":"3b8785d2-6888-4f85-bdbb-d58f0964e489","Type":"ContainerStarted","Data":"a3353bbb7e5e5c1b7ca4207520f4bd2ca1e524568b9303706442024d3b1ad383"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.163295 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" event={"ID":"4ef309b8-4aa2-411b-8620-af612f546585","Type":"ContainerStarted","Data":"8e93f82a2caa8d96c7aa1c4f5614b2e90490e227a048a58b4076fc3877d38cf6"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.166054 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" event={"ID":"659b5e72-ebf6-4446-86f2-402e0ad99bd3","Type":"ContainerStarted","Data":"ecf61129ac53b7f15ed0a49462bb078f93043870e031bbf245d0d281b239347a"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.177156 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zgv6x" podStartSLOduration=124.177130669 podStartE2EDuration="2m4.177130669s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.174235279 +0000 UTC m=+144.834524898" watchObservedRunningTime="2025-11-11 13:32:14.177130669 +0000 UTC m=+144.837420288" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.202014 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-tb95g" podStartSLOduration=124.201996372 podStartE2EDuration="2m4.201996372s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.201677353 +0000 UTC m=+144.861966962" watchObservedRunningTime="2025-11-11 13:32:14.201996372 +0000 UTC m=+144.862285991" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.208496 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" event={"ID":"793581cd-1066-4152-8a30-4004fa059137","Type":"ContainerStarted","Data":"1cc3f0c8ccd50f71cc200a53c0ff2d841501279cb426a8474e4508dec68c9aa3"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.213687 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6jh72" event={"ID":"b0401a69-94e4-473f-8f8a-795a1eeb7e9a","Type":"ContainerStarted","Data":"456d7591961b56b1ccb55d9259d9f05a44239314a05671bab180f841c5e355b2"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.217372 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" event={"ID":"479b59cc-2cef-4728-a3c8-df498efbeb99","Type":"ContainerStarted","Data":"2e8b053dba38330124ece7e2e6dd50ce4ab611b99c8f881276d30a9902a86834"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.218801 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" event={"ID":"12760c03-9304-4e86-8ff9-d297eda56122","Type":"ContainerStarted","Data":"8bedc11dbc76614ad25c8db373edf0d1488a950ecce82dc1888c143667c7ee0d"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.225723 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" event={"ID":"e4ac96a0-0de8-47d1-b101-4054af9c7fe0","Type":"ContainerStarted","Data":"812c86d33b32f0f2c47e7987c0ccdc8c5ea6a27872738790c7320c3b46ffd77b"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.227235 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-qt9tx" podStartSLOduration=124.227217546 podStartE2EDuration="2m4.227217546s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.224164641 +0000 UTC m=+144.884454260" watchObservedRunningTime="2025-11-11 13:32:14.227217546 +0000 UTC m=+144.887507165" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.233963 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tncv8" event={"ID":"cf04d2ad-3dd4-418c-b9ea-9b749105b467","Type":"ContainerStarted","Data":"67a59c98e9958665514020f45f8826e69cdffe0b8e472109f17539dfbb9a1999"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.234899 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.241852 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" event={"ID":"37480fb1-03b9-4913-a336-c18363b1e85e","Type":"ContainerStarted","Data":"96617cd32ea141498a62f5f2a9c71e998406aa89f57e1d4387405c7c4e790b37"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.242055 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.244742 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" event={"ID":"d5bef6af-57dc-4347-91dc-b57034ef5007","Type":"ContainerStarted","Data":"d787e57f6498782a3e02eb47466056cf076267d30f3a26caa8866ca753620a67"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.250187 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" event={"ID":"f34c36d1-6785-42b0-8b27-4ba2e00c4db1","Type":"ContainerStarted","Data":"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.250847 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.251140 4842 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2wgl8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.251192 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.253415 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.253709 4842 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-w2kq7 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.253741 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.253794 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.753608987 +0000 UTC m=+145.413898596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.254492 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.754482004 +0000 UTC m=+145.414771623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.254959 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.261923 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.261959 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" event={"ID":"ea6fb48c-8a20-46c1-a4d0-30ffb0040560","Type":"ContainerStarted","Data":"dc1db4cdadf0c796215afd033ca837a52ce514e7c6d269bb38f36a476d029e28"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.261985 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.263619 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-tncv8" podStartSLOduration=124.263606517 podStartE2EDuration="2m4.263606517s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.246937509 +0000 UTC m=+144.907227128" watchObservedRunningTime="2025-11-11 13:32:14.263606517 +0000 UTC m=+144.923896136" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.264391 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" event={"ID":"ffddac0e-5b23-4ced-93a0-6045f2d8a12f","Type":"ContainerStarted","Data":"444c5e18d333db54a3eb32e6c3d821b7f3f8d7d50847cc5f38497aa8c25ab9e4"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.264634 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podStartSLOduration=124.264627519 podStartE2EDuration="2m4.264627519s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.263218386 +0000 UTC m=+144.923508005" watchObservedRunningTime="2025-11-11 13:32:14.264627519 +0000 UTC m=+144.924917148" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.268144 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" event={"ID":"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6","Type":"ContainerStarted","Data":"72b4e4ead7e786f0080165875c9e3004c7b98c31c4cb6d5c1463b2a1369d0c9b"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.277714 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" event={"ID":"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6","Type":"ContainerStarted","Data":"c1045f60d341d15540496fc13b56d4144db8209e4bf576baaf49fdebfb837bb4"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.281878 4842 generic.go:334] "Generic (PLEG): container finished" podID="03795f69-f5f2-4f7f-898f-1c87fd4cf567" containerID="869552d9b26ebcf87012a8f92ac4fdf8ea7c7af0019483b253a9d54b9e663e2e" exitCode=0 Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.281971 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" event={"ID":"03795f69-f5f2-4f7f-898f-1c87fd4cf567","Type":"ContainerDied","Data":"869552d9b26ebcf87012a8f92ac4fdf8ea7c7af0019483b253a9d54b9e663e2e"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.283419 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" podStartSLOduration=124.283402623 podStartE2EDuration="2m4.283402623s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.280138531 +0000 UTC m=+144.940428170" watchObservedRunningTime="2025-11-11 13:32:14.283402623 +0000 UTC m=+144.943692242" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.285231 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-x55hn" event={"ID":"b9ef1fc5-8627-46f3-a164-e012c7dc4934","Type":"ContainerStarted","Data":"4c845f71db6f57c6637d0e6005342ff8da7254e1276647c84a081b94179f875e"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.293700 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" event={"ID":"0c277b36-785e-4e8f-828e-17e36dac70be","Type":"ContainerStarted","Data":"e24e288b07e68bf141468281c0df4cc255dee391be741a986811c75044ab9afa"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.317199 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" event={"ID":"a25deeed-3854-4f02-aa77-b7e616f2f2b8","Type":"ContainerStarted","Data":"b79ccc8cb475cb260ac4a4b995989ee6e96b70f4ae3e0719d0c991efe158d7fa"} Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.317317 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.320202 4842 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gc82n container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.320244 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.353173 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-x55hn" podStartSLOduration=6.353149101 podStartE2EDuration="6.353149101s" podCreationTimestamp="2025-11-11 13:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.350014705 +0000 UTC m=+145.010304334" watchObservedRunningTime="2025-11-11 13:32:14.353149101 +0000 UTC m=+145.013438720" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.355609 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-97v95" podStartSLOduration=124.355596748 podStartE2EDuration="2m4.355596748s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.332297224 +0000 UTC m=+144.992586843" watchObservedRunningTime="2025-11-11 13:32:14.355596748 +0000 UTC m=+145.015886367" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.356638 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.358129 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.857177847 +0000 UTC m=+145.517467486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.358272 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.359700 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.859687165 +0000 UTC m=+145.519976824 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.369263 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-lwbtb" podStartSLOduration=124.369244513 podStartE2EDuration="2m4.369244513s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.367616581 +0000 UTC m=+145.027906190" watchObservedRunningTime="2025-11-11 13:32:14.369244513 +0000 UTC m=+145.029534132" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.393316 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" podStartSLOduration=125.39329542 podStartE2EDuration="2m5.39329542s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:14.38945255 +0000 UTC m=+145.049742169" watchObservedRunningTime="2025-11-11 13:32:14.39329542 +0000 UTC m=+145.053585039" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.459874 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.460643 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.960586572 +0000 UTC m=+145.620876201 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.460858 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.461493 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:14.96147494 +0000 UTC m=+145.621764559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.565421 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.565719 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.065665489 +0000 UTC m=+145.725955098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.666880 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.667296 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.167277908 +0000 UTC m=+145.827567527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.769300 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.769482 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.269449955 +0000 UTC m=+145.929739574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.769767 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.770089 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.270079474 +0000 UTC m=+145.930369093 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.872280 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.872613 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.372598482 +0000 UTC m=+146.032888101 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.961842 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.962295 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:32:14 crc kubenswrapper[4842]: I1111 13:32:14.973753 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:14 crc kubenswrapper[4842]: E1111 13:32:14.974049 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.474037466 +0000 UTC m=+146.134327085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.077999 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.078374 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.578357259 +0000 UTC m=+146.238646878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.180925 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.181660 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.68164488 +0000 UTC m=+146.341934499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.282512 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.282942 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.782927949 +0000 UTC m=+146.443217568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.323634 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" event={"ID":"e4ac96a0-0de8-47d1-b101-4054af9c7fe0","Type":"ContainerStarted","Data":"38f7ba7d46ba3ad2d5d718fb89f4550f6736389b01261818f626de07f941a962"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.326808 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" event={"ID":"a935336a-9188-4379-abeb-df3b0387281e","Type":"ContainerStarted","Data":"abdd642e1bc47dae99d3d2b43f7d9a79518e77bc388f957235c88f91e1020284"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.326845 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" event={"ID":"a935336a-9188-4379-abeb-df3b0387281e","Type":"ContainerStarted","Data":"56bd8ef3778bf9990184765a0181a3eeb35962eb21673dc681e7097555fcba50"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.328633 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-6jh72" event={"ID":"b0401a69-94e4-473f-8f8a-795a1eeb7e9a","Type":"ContainerStarted","Data":"ce5071a7b8c2773ca74db7e849bca5ae09e1b95b00338835cd6cb381f59a04cb"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.331115 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" event={"ID":"c0c4d8fb-fcbb-43fb-94ad-0cfe4d560fd6","Type":"ContainerStarted","Data":"ee06a7aced44667fc8f50880585ca10769a1386a3cfcfcb2cc01053c14156474"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.333240 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" event={"ID":"371a8cf1-cc94-4f29-bc61-4a98b76f0c58","Type":"ContainerStarted","Data":"87809f4ad574584e5989e455ff991427004b3add51f42d07f29fa6fffcff757d"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.335638 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-svrwd" event={"ID":"033d2864-f866-455b-ab6d-9345c366ed86","Type":"ContainerStarted","Data":"62a1f86db91363ee18d6f2e6f2ade1b0e05de5eda5f18659d4013f73cc6a1a53"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.335697 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-svrwd" event={"ID":"033d2864-f866-455b-ab6d-9345c366ed86","Type":"ContainerStarted","Data":"c407cf3172e07218147ce8f131aa2425366de22771118b10b587c7d7bf08f95f"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.335767 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.339160 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" event={"ID":"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c","Type":"ContainerStarted","Data":"1db9a3ce96ad076e3f35c211081fc9dbf42f153a0fdba5202727ca96ab94103f"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.339659 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6tqv7" podStartSLOduration=125.339648433 podStartE2EDuration="2m5.339648433s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.339289812 +0000 UTC m=+145.999579431" watchObservedRunningTime="2025-11-11 13:32:15.339648433 +0000 UTC m=+145.999938052" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.345588 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" event={"ID":"22efe903-5517-47b3-9889-9ea704b2c39f","Type":"ContainerStarted","Data":"86ef54a6a2457784e1a2590d11d76797bde2285ecbab5b866e4d34da129a12d9"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.345651 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" event={"ID":"22efe903-5517-47b3-9889-9ea704b2c39f","Type":"ContainerStarted","Data":"c0901bef01d2568ef83fa8e9a07df5d72f1600d4587b6320149bb9d44ed3a930"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.349273 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" event={"ID":"72a452f5-5d0f-4728-88b4-0d6bdeaf3149","Type":"ContainerStarted","Data":"ae4c46c85c8f57c96aa345cff5546e70876d1077872d40a88729332c94f464f1"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.349327 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" event={"ID":"72a452f5-5d0f-4728-88b4-0d6bdeaf3149","Type":"ContainerStarted","Data":"cfbe67c2bf5ea0abb55a458cf78f71c8989040ed7a49d307fd63bdbb257ba62c"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.363819 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-svrwd" podStartSLOduration=7.363797104 podStartE2EDuration="7.363797104s" podCreationTimestamp="2025-11-11 13:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.361350838 +0000 UTC m=+146.021640457" watchObservedRunningTime="2025-11-11 13:32:15.363797104 +0000 UTC m=+146.024086723" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.366387 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" event={"ID":"f9740f8b-8b9e-4ffa-a716-20d83abf1362","Type":"ContainerStarted","Data":"515d37d870b2ad607a9e9af351d7882a6b41b60bd16207a4158084f7c9582f6a"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.366434 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" event={"ID":"f9740f8b-8b9e-4ffa-a716-20d83abf1362","Type":"ContainerStarted","Data":"439554d43eac7f4bba164cd28fabc307452092467dd9f563aef3ab2c1cfaf0fd"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.367752 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" event={"ID":"b562af49-ec32-42e1-86ee-48b4d7d9e3e2","Type":"ContainerStarted","Data":"487995fccfdf81271810a9cb99791a3e1e9da562d710cebd6c133a2497d423dd"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.371327 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" event={"ID":"7f1f3854-cd3e-4e04-bdcf-ea8bcec64728","Type":"ContainerStarted","Data":"6608dff42009bf21c5beeb487fc5024c05e45e433e506e78bd595dbe8c4009b5"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.371865 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.374929 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" event={"ID":"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6","Type":"ContainerStarted","Data":"d02b8c607cd0622d4a87d3bf46e231b79cedfd99a3a8da2f2869de1f19537465"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.374962 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" event={"ID":"2f5f7d70-34ae-4eff-a77c-5f5736b80ea6","Type":"ContainerStarted","Data":"89203f21e3ccf5ade0ed0dd6064049c84180683168a36c0025806e185c0b17d8"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.384094 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.387186 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" event={"ID":"3ace4a93-471f-4bc1-aded-b6aadf15f3c1","Type":"ContainerStarted","Data":"d87011db70f63de3f5c617498c60305dbf7d93c9e572ee0b2bdd9a01aa2ca6f4"} Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.390309 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.890285508 +0000 UTC m=+146.550575127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.391376 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-2vg2q" podStartSLOduration=125.391354661 podStartE2EDuration="2m5.391354661s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.385037755 +0000 UTC m=+146.045327374" watchObservedRunningTime="2025-11-11 13:32:15.391354661 +0000 UTC m=+146.051644280" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.403412 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" event={"ID":"37480fb1-03b9-4913-a336-c18363b1e85e","Type":"ContainerStarted","Data":"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.404341 4842 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2wgl8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.404399 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.407647 4842 generic.go:334] "Generic (PLEG): container finished" podID="f24d13cc-287a-4029-8151-a8a07dccd223" containerID="ee0b164faad3bfd91f4115776b94e96689b64fccbb2baa47da56ca72fd648634" exitCode=0 Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.408285 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" event={"ID":"f24d13cc-287a-4029-8151-a8a07dccd223","Type":"ContainerDied","Data":"ee0b164faad3bfd91f4115776b94e96689b64fccbb2baa47da56ca72fd648634"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.408332 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.408346 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" event={"ID":"f24d13cc-287a-4029-8151-a8a07dccd223","Type":"ContainerStarted","Data":"91d5fc37a25bad25da13f012a6e6d70138770ee9774eac2b6c916aa572e680f4"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.414687 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" event={"ID":"d5bef6af-57dc-4347-91dc-b57034ef5007","Type":"ContainerStarted","Data":"cfe1c64c1c3e48fa078f1e9cb31cc6efeb8f44c8a9038ef642b6fd1ccef87288"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.422074 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" event={"ID":"ffddac0e-5b23-4ced-93a0-6045f2d8a12f","Type":"ContainerStarted","Data":"f6c9181ce5b0763899a5afe610ed998224aef911507d55e48e23b4fbc0e29152"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.423303 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-6jh72" podStartSLOduration=7.423279783 podStartE2EDuration="7.423279783s" podCreationTimestamp="2025-11-11 13:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.422349054 +0000 UTC m=+146.082638673" watchObservedRunningTime="2025-11-11 13:32:15.423279783 +0000 UTC m=+146.083569402" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.432298 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9x6tf" event={"ID":"12b41511-3266-433f-9580-b102a55a087b","Type":"ContainerStarted","Data":"39908401814a9f5d9b2186b76f0843c9a76d83c58d246a205ecd330dfec198d5"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.435728 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" event={"ID":"bf00a84f-494a-4ab6-8336-d08e94ddfe70","Type":"ContainerStarted","Data":"21c2bacf99626687a8177420ecea257de33a2049118d60e6286921e6063fcbd5"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.440125 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" event={"ID":"03795f69-f5f2-4f7f-898f-1c87fd4cf567","Type":"ContainerStarted","Data":"9c4c15d7d6d17170aa0231d2915d001d4b2d38296d0b9f02c1938221975c0ca3"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.443383 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" event={"ID":"479b59cc-2cef-4728-a3c8-df498efbeb99","Type":"ContainerStarted","Data":"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.444350 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.446304 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" event={"ID":"4ef309b8-4aa2-411b-8620-af612f546585","Type":"ContainerStarted","Data":"c7981b19477e0ef36ff3b47b833ad6a873b19c6f68166e2ade8fa82f2d24078b"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.449020 4842 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-v984h container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.449065 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.451025 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" event={"ID":"1f873d1b-d95b-4b8e-9c04-05821213f2cd","Type":"ContainerStarted","Data":"33e07272b6a621311e86142a986c5684fa3cb75c165e256d1e852303893eadd6"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.457368 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" event={"ID":"5df99cba-50a8-40bf-be6d-93b43eccd4ea","Type":"ContainerStarted","Data":"220e9dfc0031c8edfde6c24a3b530e8851bf7951dd150807d4509baf1cc11d11"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.457963 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lgql9" podStartSLOduration=126.457944091 podStartE2EDuration="2m6.457944091s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.452185542 +0000 UTC m=+146.112475161" watchObservedRunningTime="2025-11-11 13:32:15.457944091 +0000 UTC m=+146.118233710" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.458038 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.461262 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" event={"ID":"12760c03-9304-4e86-8ff9-d297eda56122","Type":"ContainerStarted","Data":"82cde4bcb7c96e34c89a8b40dd45e72e71c50675179bcb9af5080091efc92303"} Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.462154 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.462786 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.462839 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.462921 4842 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hw59z container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:5443/healthz\": dial tcp 10.217.0.31:5443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.462946 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" podUID="5df99cba-50a8-40bf-be6d-93b43eccd4ea" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.31:5443/healthz\": dial tcp 10.217.0.31:5443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.471883 4842 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-qr7hq container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.471965 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" podUID="462ebd3e-c3eb-4e6a-bebd-ee457a956356" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472059 4842 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gc82n container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472076 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472167 4842 patch_prober.go:28] interesting pod/console-operator-58897d9998-jczw6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472188 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-jczw6" podUID="ef8bc024-9a73-46be-9d0a-853e3ca0b0cb" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472246 4842 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-w2kq7 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472264 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472322 4842 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mhlzl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.472337 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" podUID="12760c03-9304-4e86-8ff9-d297eda56122" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.485909 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.489592 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:15.989569844 +0000 UTC m=+146.649859513 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.538980 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-85wk5" podStartSLOduration=125.53895607 podStartE2EDuration="2m5.53895607s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.489355188 +0000 UTC m=+146.149644807" watchObservedRunningTime="2025-11-11 13:32:15.53895607 +0000 UTC m=+146.199245689" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.539771 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-95x5l" podStartSLOduration=125.539764086 podStartE2EDuration="2m5.539764086s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.530930961 +0000 UTC m=+146.191220580" watchObservedRunningTime="2025-11-11 13:32:15.539764086 +0000 UTC m=+146.200053695" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.586413 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-56fvg" podStartSLOduration=125.586391985 podStartE2EDuration="2m5.586391985s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.558222919 +0000 UTC m=+146.218512538" watchObservedRunningTime="2025-11-11 13:32:15.586391985 +0000 UTC m=+146.246681604" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.587121 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" podStartSLOduration=125.587114838 podStartE2EDuration="2m5.587114838s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.583578797 +0000 UTC m=+146.243868416" watchObservedRunningTime="2025-11-11 13:32:15.587114838 +0000 UTC m=+146.247404457" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.591756 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.592084 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.092073932 +0000 UTC m=+146.752363551 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.609134 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" podStartSLOduration=125.609094281 podStartE2EDuration="2m5.609094281s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.607304465 +0000 UTC m=+146.267594114" watchObservedRunningTime="2025-11-11 13:32:15.609094281 +0000 UTC m=+146.269383900" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.654793 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" podStartSLOduration=125.654670198 podStartE2EDuration="2m5.654670198s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.651155929 +0000 UTC m=+146.311445558" watchObservedRunningTime="2025-11-11 13:32:15.654670198 +0000 UTC m=+146.314959817" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.655120 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" podStartSLOduration=125.655115261 podStartE2EDuration="2m5.655115261s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.628262357 +0000 UTC m=+146.288551966" watchObservedRunningTime="2025-11-11 13:32:15.655115261 +0000 UTC m=+146.315404880" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.669360 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" podStartSLOduration=125.669345654 podStartE2EDuration="2m5.669345654s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.667422284 +0000 UTC m=+146.327711903" watchObservedRunningTime="2025-11-11 13:32:15.669345654 +0000 UTC m=+146.329635273" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.688256 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-zpgpg" podStartSLOduration=125.688236982 podStartE2EDuration="2m5.688236982s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.684949069 +0000 UTC m=+146.345238698" watchObservedRunningTime="2025-11-11 13:32:15.688236982 +0000 UTC m=+146.348526601" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.694296 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.694698 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.194678161 +0000 UTC m=+146.854967780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.731478 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" podStartSLOduration=125.731457765 podStartE2EDuration="2m5.731457765s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.71362817 +0000 UTC m=+146.373917789" watchObservedRunningTime="2025-11-11 13:32:15.731457765 +0000 UTC m=+146.391747384" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.731666 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" podStartSLOduration=125.731661811 podStartE2EDuration="2m5.731661811s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.72776882 +0000 UTC m=+146.388058449" watchObservedRunningTime="2025-11-11 13:32:15.731661811 +0000 UTC m=+146.391951430" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.761616 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-5j9w4" podStartSLOduration=125.761596682 podStartE2EDuration="2m5.761596682s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.746173883 +0000 UTC m=+146.406463492" watchObservedRunningTime="2025-11-11 13:32:15.761596682 +0000 UTC m=+146.421886301" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.763624 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-9x6tf" podStartSLOduration=125.763616815 podStartE2EDuration="2m5.763616815s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.761040465 +0000 UTC m=+146.421330084" watchObservedRunningTime="2025-11-11 13:32:15.763616815 +0000 UTC m=+146.423906424" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.786492 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-jwb8g" podStartSLOduration=125.786474265 podStartE2EDuration="2m5.786474265s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.783524953 +0000 UTC m=+146.443814572" watchObservedRunningTime="2025-11-11 13:32:15.786474265 +0000 UTC m=+146.446763884" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.796441 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.796883 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.296869139 +0000 UTC m=+146.957158758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.807500 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qr9r2" podStartSLOduration=125.807455328 podStartE2EDuration="2m5.807455328s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.806141237 +0000 UTC m=+146.466430856" watchObservedRunningTime="2025-11-11 13:32:15.807455328 +0000 UTC m=+146.467744947" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.839567 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-22v98" podStartSLOduration=125.839551115 podStartE2EDuration="2m5.839551115s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.838284317 +0000 UTC m=+146.498573936" watchObservedRunningTime="2025-11-11 13:32:15.839551115 +0000 UTC m=+146.499840734" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.864372 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ztl26" podStartSLOduration=125.864356767 podStartE2EDuration="2m5.864356767s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.862582562 +0000 UTC m=+146.522872191" watchObservedRunningTime="2025-11-11 13:32:15.864356767 +0000 UTC m=+146.524646386" Nov 11 13:32:15 crc kubenswrapper[4842]: I1111 13:32:15.898172 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:15 crc kubenswrapper[4842]: E1111 13:32:15.898499 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.398482898 +0000 UTC m=+147.058772517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.000061 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.000392 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.500380556 +0000 UTC m=+147.160670185 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.101051 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.101261 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.601232412 +0000 UTC m=+147.261522041 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.101437 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.101861 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.601824141 +0000 UTC m=+147.262113830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.202778 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.203171 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.703136901 +0000 UTC m=+147.363426520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.216917 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.218616 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.218686 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.304697 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.305170 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.805149052 +0000 UTC m=+147.465438741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.405441 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.405637 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.905611525 +0000 UTC m=+147.565901144 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.406310 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.406779 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:16.906758511 +0000 UTC m=+147.567048200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.467954 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" event={"ID":"1d6301f1-a0a6-47f7-8fe1-7fa00daa867c","Type":"ContainerStarted","Data":"9b88cec64a248873e68bc5052fcd25ee6745710a40bc489ec461cd5a99d1fcb4"} Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.473717 4842 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-w2kq7 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.473737 4842 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2wgl8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.473767 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.473791 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474019 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" event={"ID":"ea6fb48c-8a20-46c1-a4d0-30ffb0040560","Type":"ContainerStarted","Data":"cb65f30bb94a7f4f815dfe82df6d5b273b9d56c56fb656422bcf2dccdd28b800"} Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474231 4842 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-v984h container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474287 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474313 4842 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mhlzl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474366 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" podUID="12760c03-9304-4e86-8ff9-d297eda56122" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474424 4842 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hw59z container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:5443/healthz\": dial tcp 10.217.0.31:5443: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.474460 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" podUID="5df99cba-50a8-40bf-be6d-93b43eccd4ea" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.31:5443/healthz\": dial tcp 10.217.0.31:5443: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.516690 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.519345 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.019312481 +0000 UTC m=+147.679602110 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.540228 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.540612 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.542826 4842 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-krvwl container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.542889 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" podUID="03795f69-f5f2-4f7f-898f-1c87fd4cf567" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.556899 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8fdf6" podStartSLOduration=126.556883228 podStartE2EDuration="2m6.556883228s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:15.89532362 +0000 UTC m=+146.555613239" watchObservedRunningTime="2025-11-11 13:32:16.556883228 +0000 UTC m=+147.217172847" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.558914 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" podStartSLOduration=127.558901072 podStartE2EDuration="2m7.558901072s" podCreationTimestamp="2025-11-11 13:30:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:16.555474065 +0000 UTC m=+147.215763704" watchObservedRunningTime="2025-11-11 13:32:16.558901072 +0000 UTC m=+147.219190691" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.608451 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-9k5gp" podStartSLOduration=126.608435622 podStartE2EDuration="2m6.608435622s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:16.595506399 +0000 UTC m=+147.255796018" watchObservedRunningTime="2025-11-11 13:32:16.608435622 +0000 UTC m=+147.268725241" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.620067 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.621129 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.121090115 +0000 UTC m=+147.781379734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.678189 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-qr7hq" Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.722240 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.722457 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.222427105 +0000 UTC m=+147.882716724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.722742 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.723058 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.223045775 +0000 UTC m=+147.883335394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.824406 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.824607 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.324583932 +0000 UTC m=+147.984873551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.824747 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.825069 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.325057086 +0000 UTC m=+147.985346705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:16 crc kubenswrapper[4842]: I1111 13:32:16.925768 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:16 crc kubenswrapper[4842]: E1111 13:32:16.926154 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.426138389 +0000 UTC m=+148.086428008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.027190 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.027541 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.527525891 +0000 UTC m=+148.187815510 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.096475 4842 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-bs2dn container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.096557 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" podUID="f24d13cc-287a-4029-8151-a8a07dccd223" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.096841 4842 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-bs2dn container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.096858 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" podUID="f24d13cc-287a-4029-8151-a8a07dccd223" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.128709 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.129164 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.629147711 +0000 UTC m=+148.289437330 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.219704 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:17 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:17 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:17 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.220010 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.230498 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.230872 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.730858333 +0000 UTC m=+148.391147952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.331398 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.331771 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.83175325 +0000 UTC m=+148.492042869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.432807 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.433220 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:17.933202624 +0000 UTC m=+148.593492323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.490695 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mhlzl" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.533949 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.534170 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.034138343 +0000 UTC m=+148.694427972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.534584 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.534990 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.034978499 +0000 UTC m=+148.695268118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.605892 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.635583 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.635811 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.135771113 +0000 UTC m=+148.796060732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.635858 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.636286 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.136267758 +0000 UTC m=+148.796557377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.737450 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.737612 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.237588248 +0000 UTC m=+148.897877867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.737761 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.738254 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.238236758 +0000 UTC m=+148.898526377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.838755 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.838938 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.338911128 +0000 UTC m=+148.999200747 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.839323 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.839371 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.839417 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.839700 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.339687373 +0000 UTC m=+148.999976992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.840198 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.848754 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.940451 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.940633 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.940673 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:17 crc kubenswrapper[4842]: E1111 13:32:17.941207 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.441179978 +0000 UTC m=+149.101469597 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.947402 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:17 crc kubenswrapper[4842]: I1111 13:32:17.948019 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.041617 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.042018 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.542001983 +0000 UTC m=+149.202291602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.085649 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.097984 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.146193 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.146684 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.646667657 +0000 UTC m=+149.306957286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.171238 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.225558 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:18 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:18 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:18 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.225615 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.250548 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.250857 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.750845886 +0000 UTC m=+149.411135505 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.352210 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.352573 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.852554779 +0000 UTC m=+149.512844398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.454525 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.455225 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:18.95521256 +0000 UTC m=+149.615502179 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.527475 4842 generic.go:334] "Generic (PLEG): container finished" podID="b562af49-ec32-42e1-86ee-48b4d7d9e3e2" containerID="487995fccfdf81271810a9cb99791a3e1e9da562d710cebd6c133a2497d423dd" exitCode=0 Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.527579 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" event={"ID":"b562af49-ec32-42e1-86ee-48b4d7d9e3e2","Type":"ContainerDied","Data":"487995fccfdf81271810a9cb99791a3e1e9da562d710cebd6c133a2497d423dd"} Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.560041 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.560387 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.06036256 +0000 UTC m=+149.720652169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.564447 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" event={"ID":"ea6fb48c-8a20-46c1-a4d0-30ffb0040560","Type":"ContainerStarted","Data":"b03d5102b83a9515a1e9b9d81b1aa9fcaa7a78baf4142e9e333ab66f181b673c"} Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.662928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.663923 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.163907509 +0000 UTC m=+149.824197128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.714290 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.715094 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.723468 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.723980 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.727247 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.774246 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.774692 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.774720 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.774854 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.274838988 +0000 UTC m=+149.935128607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.818414 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.819963 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.825546 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.855194 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875436 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875469 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875487 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875507 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sl86\" (UniqueName: \"kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875524 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.875567 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.875857 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.375844838 +0000 UTC m=+150.036134457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.876268 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.926959 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.976653 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.976850 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sl86\" (UniqueName: \"kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.976884 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.976971 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.977350 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: E1111 13:32:18.977416 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.477402546 +0000 UTC m=+150.137692165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.977858 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.980013 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:32:18 crc kubenswrapper[4842]: I1111 13:32:18.982537 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.011533 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.015873 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sl86\" (UniqueName: \"kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86\") pod \"certified-operators-254hd\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.017586 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.085773 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttskp\" (UniqueName: \"kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.085812 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.085850 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.085889 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.086252 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.58623971 +0000 UTC m=+150.246529329 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.100356 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:19 crc kubenswrapper[4842]: W1111 13:32:19.148181 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-b2c27fab8dd252861405dfdf9658f8cd2a9f368868d26323ea404640f87a6fa0 WatchSource:0}: Error finding container b2c27fab8dd252861405dfdf9658f8cd2a9f368868d26323ea404640f87a6fa0: Status 404 returned error can't find the container with id b2c27fab8dd252861405dfdf9658f8cd2a9f368868d26323ea404640f87a6fa0 Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.179636 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.181304 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.188414 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.188723 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttskp\" (UniqueName: \"kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.188761 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.188825 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.189327 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.189693 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.689666085 +0000 UTC m=+150.349955714 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.189714 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.202385 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.216501 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.227122 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:19 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:19 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:19 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.227426 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.235228 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttskp\" (UniqueName: \"kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp\") pod \"community-operators-rznqq\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.291948 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.292024 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.292046 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6qn7\" (UniqueName: \"kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.292078 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.292439 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.7924273 +0000 UTC m=+150.452716919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.364016 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.364953 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.377780 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.386388 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.395686 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.395901 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.395948 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6qn7\" (UniqueName: \"kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.396027 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.397066 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.897048863 +0000 UTC m=+150.557338482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.397088 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.397392 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.442408 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6qn7\" (UniqueName: \"kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7\") pod \"certified-operators-gfkfk\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.496840 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.496995 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.497174 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsz2z\" (UniqueName: \"kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.497246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.497719 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:19.997704083 +0000 UTC m=+150.657993702 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.541607 4842 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.602670 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.602951 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.603035 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.603123 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsz2z\" (UniqueName: \"kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.603518 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.103499832 +0000 UTC m=+150.763789451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.603954 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.604051 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.622413 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"923956ce20744285ddcee0fdc316f7eaac10ad9eaf9169f73f516e3e1b0016b5"} Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.622463 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"0addb1b248faaef3061433dca50e3f19d4a32fa60faa9324ca8f832923e5955d"} Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.634933 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.635339 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d78a12a2c1abe7f9b51dbfc5cdf717602a94a4c17377cf10e4d94c1d0165cfa3"} Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.635397 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6a4663143bbefac5afbe32572ac947cd5c269cd00550ae61a0a42ebb2b54fc7a"} Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.638511 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsz2z\" (UniqueName: \"kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z\") pod \"community-operators-mx9qp\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.670542 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.679587 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"b2c27fab8dd252861405dfdf9658f8cd2a9f368868d26323ea404640f87a6fa0"} Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.680201 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.704300 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.705431 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.205420051 +0000 UTC m=+150.865709660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.745830 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.813182 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.815125 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.31508584 +0000 UTC m=+150.975375459 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.888725 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.909377 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:32:19 crc kubenswrapper[4842]: I1111 13:32:19.920539 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:19 crc kubenswrapper[4842]: E1111 13:32:19.920925 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.420909141 +0000 UTC m=+151.081198770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:19 crc kubenswrapper[4842]: W1111 13:32:19.977789 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddff8b002_18bd_499b_92d2_f739d29131e8.slice/crio-7a030e8726637406a9bf7ee8254fb8dc53d7991fc4f4412ac845fde00478af3e WatchSource:0}: Error finding container 7a030e8726637406a9bf7ee8254fb8dc53d7991fc4f4412ac845fde00478af3e: Status 404 returned error can't find the container with id 7a030e8726637406a9bf7ee8254fb8dc53d7991fc4f4412ac845fde00478af3e Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.024662 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:20 crc kubenswrapper[4842]: E1111 13:32:20.025359 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.525341427 +0000 UTC m=+151.185631046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.135186 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: E1111 13:32:20.135579 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.635565535 +0000 UTC m=+151.295855154 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.231491 4842 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-11T13:32:19.541643549Z","Handler":null,"Name":""} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.236482 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:20 crc kubenswrapper[4842]: E1111 13:32:20.236989 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.736968828 +0000 UTC m=+151.397258447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.237448 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: E1111 13:32:20.238380 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-11 13:32:20.738367251 +0000 UTC m=+151.398656870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-d9vpf" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.275329 4842 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.276118 4842 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.291241 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bs2dn" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.306758 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:20 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:20 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:20 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.306836 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.342380 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.373558 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.444661 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.465919 4842 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.465990 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.549073 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-d9vpf\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.623877 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.650547 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:20 crc kubenswrapper[4842]: W1111 13:32:20.685564 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf27cec58_9f93_429d_b971_b00a25be5058.slice/crio-7f47ddf83476d7dbf55874ee9c4af1a3c444698c47ee9960b684fa48a13b84d6 WatchSource:0}: Error finding container 7f47ddf83476d7dbf55874ee9c4af1a3c444698c47ee9960b684fa48a13b84d6: Status 404 returned error can't find the container with id 7f47ddf83476d7dbf55874ee9c4af1a3c444698c47ee9960b684fa48a13b84d6 Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.691532 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" event={"ID":"b562af49-ec32-42e1-86ee-48b4d7d9e3e2","Type":"ContainerDied","Data":"c533a3113c2494d70d7005b0801d0279a7d551f884630b486c8e8fd68ad73eea"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.691584 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c533a3113c2494d70d7005b0801d0279a7d551f884630b486c8e8fd68ad73eea" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.691541 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.695678 4842 generic.go:334] "Generic (PLEG): container finished" podID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerID="0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610" exitCode=0 Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.695781 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerDied","Data":"0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.695829 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerStarted","Data":"48bd50ed6491018dc06a96351eca8cbea088e04a84d0435a087700188b00420a"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.697798 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"11f17324d75f8a838fed2360931158ad8d197b4c7d5ba8d0e350b1527ee107b9"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.707471 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.708923 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c1353775-2921-4576-9b5b-8ca99d8195a7","Type":"ContainerStarted","Data":"1e351e32957b656317b4ebe834238625f4493df872df0d59481d8800d24289e5"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.708982 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c1353775-2921-4576-9b5b-8ca99d8195a7","Type":"ContainerStarted","Data":"dbf7391b69653cc3c1371de7bc1c68f2c0bc753142f7bc37910b764bd2ebc6b4"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.729068 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" event={"ID":"ea6fb48c-8a20-46c1-a4d0-30ffb0040560","Type":"ContainerStarted","Data":"20160b402f5a11806c6001def48fe86bbf951ff8740e6e3ba9e578de11fd791c"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.729137 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" event={"ID":"ea6fb48c-8a20-46c1-a4d0-30ffb0040560","Type":"ContainerStarted","Data":"c7343c0de75578344c0f2b9d5202a391cb03c6717b1caaf12c65ca2a19788e1d"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.730631 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.730611185 podStartE2EDuration="2.730611185s" podCreationTimestamp="2025-11-11 13:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:20.72882133 +0000 UTC m=+151.389110959" watchObservedRunningTime="2025-11-11 13:32:20.730611185 +0000 UTC m=+151.390900804" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.734841 4842 generic.go:334] "Generic (PLEG): container finished" podID="dff8b002-18bd-499b-92d2-f739d29131e8" containerID="0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c" exitCode=0 Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.734894 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerDied","Data":"0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.734924 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerStarted","Data":"7a030e8726637406a9bf7ee8254fb8dc53d7991fc4f4412ac845fde00478af3e"} Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.749624 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume\") pod \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.749701 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume\") pod \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.749830 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckxwp\" (UniqueName: \"kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp\") pod \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\" (UID: \"b562af49-ec32-42e1-86ee-48b4d7d9e3e2\") " Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.753258 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume" (OuterVolumeSpecName: "config-volume") pod "b562af49-ec32-42e1-86ee-48b4d7d9e3e2" (UID: "b562af49-ec32-42e1-86ee-48b4d7d9e3e2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.757391 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-jkk9p" podStartSLOduration=12.757363027 podStartE2EDuration="12.757363027s" podCreationTimestamp="2025-11-11 13:32:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:20.754265161 +0000 UTC m=+151.414554780" watchObservedRunningTime="2025-11-11 13:32:20.757363027 +0000 UTC m=+151.417652646" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.760072 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp" (OuterVolumeSpecName: "kube-api-access-ckxwp") pod "b562af49-ec32-42e1-86ee-48b4d7d9e3e2" (UID: "b562af49-ec32-42e1-86ee-48b4d7d9e3e2"). InnerVolumeSpecName "kube-api-access-ckxwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.760551 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b562af49-ec32-42e1-86ee-48b4d7d9e3e2" (UID: "b562af49-ec32-42e1-86ee-48b4d7d9e3e2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.807587 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.852132 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.852166 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.852180 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckxwp\" (UniqueName: \"kubernetes.io/projected/b562af49-ec32-42e1-86ee-48b4d7d9e3e2-kube-api-access-ckxwp\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.875618 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.962654 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:32:20 crc kubenswrapper[4842]: E1111 13:32:20.962895 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b562af49-ec32-42e1-86ee-48b4d7d9e3e2" containerName="collect-profiles" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.962918 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b562af49-ec32-42e1-86ee-48b4d7d9e3e2" containerName="collect-profiles" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.963129 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b562af49-ec32-42e1-86ee-48b4d7d9e3e2" containerName="collect-profiles" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.964228 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.967312 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 11 13:32:20 crc kubenswrapper[4842]: I1111 13:32:20.972662 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.045583 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.045647 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.046234 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.046257 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.054794 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.054875 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.054954 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b99lm\" (UniqueName: \"kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.125588 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.125630 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.131269 4842 patch_prober.go:28] interesting pod/console-f9d7485db-lwbtb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.131347 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lwbtb" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.139061 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.156299 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.156418 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.156660 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b99lm\" (UniqueName: \"kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.157606 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.157884 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.182367 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.191470 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.204177 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b99lm\" (UniqueName: \"kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm\") pod \"redhat-marketplace-2479l\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.229684 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:21 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:21 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:21 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.229747 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.317866 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.350634 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.351873 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.353050 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.354639 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.365327 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.381017 4842 patch_prober.go:28] interesting pod/apiserver-76f77b778f-ppkl7 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]log ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]etcd ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/generic-apiserver-start-informers ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/max-in-flight-filter ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 11 13:32:21 crc kubenswrapper[4842]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 11 13:32:21 crc kubenswrapper[4842]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/project.openshift.io-projectcache ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/openshift.io-startinformers ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 11 13:32:21 crc kubenswrapper[4842]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 11 13:32:21 crc kubenswrapper[4842]: livez check failed Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.381077 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" podUID="1d6301f1-a0a6-47f7-8fe1-7fa00daa867c" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.463074 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.463162 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bnkm\" (UniqueName: \"kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.463180 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.544322 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.553645 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.558659 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-krvwl" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.566402 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.566470 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bnkm\" (UniqueName: \"kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.566499 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.566965 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.567061 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.570007 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-jczw6" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.592287 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bnkm\" (UniqueName: \"kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm\") pod \"redhat-marketplace-5m2t2\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.673447 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.702943 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.788715 4842 generic.go:334] "Generic (PLEG): container finished" podID="f27cec58-9f93-429d-b971-b00a25be5058" containerID="ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3" exitCode=0 Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.788794 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerDied","Data":"ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.788831 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerStarted","Data":"7f47ddf83476d7dbf55874ee9c4af1a3c444698c47ee9960b684fa48a13b84d6"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.828472 4842 generic.go:334] "Generic (PLEG): container finished" podID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerID="f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62" exitCode=0 Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.828606 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerDied","Data":"f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.828635 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerStarted","Data":"080a78ac2a6e963c05d8d6a66cfdf94a3425327ff804244380ce639f0b4105fc"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.870393 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" event={"ID":"102c5e64-9ad0-4dc2-a6cf-bac2683db16e","Type":"ContainerStarted","Data":"d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.870451 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" event={"ID":"102c5e64-9ad0-4dc2-a6cf-bac2683db16e","Type":"ContainerStarted","Data":"d6c50388a069f5f0f7a553527dcde411d7c0820dab06a3b167e00fc64a845980"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.871339 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.896811 4842 generic.go:334] "Generic (PLEG): container finished" podID="c1353775-2921-4576-9b5b-8ca99d8195a7" containerID="1e351e32957b656317b4ebe834238625f4493df872df0d59481d8800d24289e5" exitCode=0 Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.896907 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c1353775-2921-4576-9b5b-8ca99d8195a7","Type":"ContainerDied","Data":"1e351e32957b656317b4ebe834238625f4493df872df0d59481d8800d24289e5"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.903537 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" podStartSLOduration=131.903524342 podStartE2EDuration="2m11.903524342s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:32:21.90214379 +0000 UTC m=+152.562433419" watchObservedRunningTime="2025-11-11 13:32:21.903524342 +0000 UTC m=+152.563813951" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.917841 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerStarted","Data":"2bc4f0a1eae90bb9dd8126d80140e109b7762c7c91fda304ff600730fbbe0317"} Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.962316 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.963508 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.966706 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.967009 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hw59z" Nov 11 13:32:21 crc kubenswrapper[4842]: I1111 13:32:21.982638 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.093677 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.093775 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.093800 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd8hx\" (UniqueName: \"kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.096988 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.196166 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.196262 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.196308 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd8hx\" (UniqueName: \"kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.198361 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.198944 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.227823 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.235579 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd8hx\" (UniqueName: \"kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx\") pod \"redhat-operators-q2br9\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.237813 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:22 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:22 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:22 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.237860 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.296982 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.355786 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.356828 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.371186 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.401403 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.401472 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ccbs\" (UniqueName: \"kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.401543 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.404328 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.504755 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.504862 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.504909 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ccbs\" (UniqueName: \"kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.505923 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.505978 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.534247 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ccbs\" (UniqueName: \"kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs\") pod \"redhat-operators-6m7j9\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.607936 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.688021 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.772880 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.773658 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.776092 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.779901 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.780853 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.908794 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.908867 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.931302 4842 generic.go:334] "Generic (PLEG): container finished" podID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerID="f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a" exitCode=0 Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.931540 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerDied","Data":"f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a"} Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.931567 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerStarted","Data":"962f4e084dbd1bc11f5541402eeea74c0f496c05e06f7a288f3b9e4dd753d366"} Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.937801 4842 generic.go:334] "Generic (PLEG): container finished" podID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerID="6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76" exitCode=0 Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.937945 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerDied","Data":"6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76"} Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.964041 4842 generic.go:334] "Generic (PLEG): container finished" podID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerID="7c9d12af9f23906b062f257ce583abcffc9ca255d9ac983b5ba9dbafb1105b67" exitCode=0 Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.966503 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerDied","Data":"7c9d12af9f23906b062f257ce583abcffc9ca255d9ac983b5ba9dbafb1105b67"} Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.966530 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerStarted","Data":"46cf31606a16867cbb0a3fbdec4289de0419a7467e83fd4c338cc10a9dba2852"} Nov 11 13:32:22 crc kubenswrapper[4842]: I1111 13:32:22.977994 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.010928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.011067 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.012348 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.036160 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.128605 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.219703 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:23 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:23 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:23 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.220009 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.268641 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.315687 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access\") pod \"c1353775-2921-4576-9b5b-8ca99d8195a7\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.315763 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir\") pod \"c1353775-2921-4576-9b5b-8ca99d8195a7\" (UID: \"c1353775-2921-4576-9b5b-8ca99d8195a7\") " Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.315917 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c1353775-2921-4576-9b5b-8ca99d8195a7" (UID: "c1353775-2921-4576-9b5b-8ca99d8195a7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.316045 4842 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c1353775-2921-4576-9b5b-8ca99d8195a7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.320645 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c1353775-2921-4576-9b5b-8ca99d8195a7" (UID: "c1353775-2921-4576-9b5b-8ca99d8195a7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.417666 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1353775-2921-4576-9b5b-8ca99d8195a7-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.441050 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.972860 4842 generic.go:334] "Generic (PLEG): container finished" podID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerID="64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b" exitCode=0 Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.972899 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerDied","Data":"64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b"} Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.973211 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerStarted","Data":"f76cf67a63ac7f4fa2a6cec2666451aea763666061f550319b3b0adddd6b3a95"} Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.978654 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"038beaba-891c-479e-9084-f9d302c26845","Type":"ContainerStarted","Data":"bed0ba40038fa5533821262f0e8d6adf98073eac97a8e876e1614bd978cba13e"} Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.978727 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"038beaba-891c-479e-9084-f9d302c26845","Type":"ContainerStarted","Data":"95d9c5aa1719cccfb0b9325b2e328d1108cd20849eb326c4ede40958af7f8a6a"} Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.984477 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.986091 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c1353775-2921-4576-9b5b-8ca99d8195a7","Type":"ContainerDied","Data":"dbf7391b69653cc3c1371de7bc1c68f2c0bc753142f7bc37910b764bd2ebc6b4"} Nov 11 13:32:23 crc kubenswrapper[4842]: I1111 13:32:23.986187 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbf7391b69653cc3c1371de7bc1c68f2c0bc753142f7bc37910b764bd2ebc6b4" Nov 11 13:32:24 crc kubenswrapper[4842]: I1111 13:32:24.025157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-svrwd" Nov 11 13:32:24 crc kubenswrapper[4842]: I1111 13:32:24.219042 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:24 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:24 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:24 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:24 crc kubenswrapper[4842]: I1111 13:32:24.219214 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:25 crc kubenswrapper[4842]: I1111 13:32:25.006847 4842 generic.go:334] "Generic (PLEG): container finished" podID="038beaba-891c-479e-9084-f9d302c26845" containerID="bed0ba40038fa5533821262f0e8d6adf98073eac97a8e876e1614bd978cba13e" exitCode=0 Nov 11 13:32:25 crc kubenswrapper[4842]: I1111 13:32:25.006910 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"038beaba-891c-479e-9084-f9d302c26845","Type":"ContainerDied","Data":"bed0ba40038fa5533821262f0e8d6adf98073eac97a8e876e1614bd978cba13e"} Nov 11 13:32:25 crc kubenswrapper[4842]: I1111 13:32:25.218922 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:25 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:25 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:25 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:25 crc kubenswrapper[4842]: I1111 13:32:25.219298 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:26 crc kubenswrapper[4842]: I1111 13:32:26.223946 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:26 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:26 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:26 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:26 crc kubenswrapper[4842]: I1111 13:32:26.224010 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:26 crc kubenswrapper[4842]: I1111 13:32:26.358034 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:26 crc kubenswrapper[4842]: I1111 13:32:26.363304 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-ppkl7" Nov 11 13:32:27 crc kubenswrapper[4842]: I1111 13:32:27.218142 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:27 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:27 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:27 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:27 crc kubenswrapper[4842]: I1111 13:32:27.218477 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:28 crc kubenswrapper[4842]: I1111 13:32:28.218711 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:28 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:28 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:28 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:28 crc kubenswrapper[4842]: I1111 13:32:28.218777 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:29 crc kubenswrapper[4842]: I1111 13:32:29.218591 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:29 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:29 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:29 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:29 crc kubenswrapper[4842]: I1111 13:32:29.218668 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:30 crc kubenswrapper[4842]: I1111 13:32:30.217930 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:30 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:30 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:30 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:30 crc kubenswrapper[4842]: I1111 13:32:30.218228 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.044112 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.044139 4842 patch_prober.go:28] interesting pod/downloads-7954f5f757-tncv8 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" start-of-body= Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.044176 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.044196 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tncv8" podUID="cf04d2ad-3dd4-418c-b9ea-9b749105b467" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.8:8080/\": dial tcp 10.217.0.8:8080: connect: connection refused" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.120609 4842 patch_prober.go:28] interesting pod/console-f9d7485db-lwbtb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.120808 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lwbtb" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.220225 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:31 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:31 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:31 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.220310 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.838680 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.846167 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6b899889-1664-4e26-9cc9-0667626ac715-metrics-certs\") pod \"network-metrics-daemon-hbtjv\" (UID: \"6b899889-1664-4e26-9cc9-0667626ac715\") " pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:31 crc kubenswrapper[4842]: I1111 13:32:31.908219 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-hbtjv" Nov 11 13:32:32 crc kubenswrapper[4842]: I1111 13:32:32.218951 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:32 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:32 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:32 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:32 crc kubenswrapper[4842]: I1111 13:32:32.219345 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:33 crc kubenswrapper[4842]: I1111 13:32:33.217837 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:33 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:33 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:33 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:33 crc kubenswrapper[4842]: I1111 13:32:33.217918 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:34 crc kubenswrapper[4842]: I1111 13:32:34.218549 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:34 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:34 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:34 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:34 crc kubenswrapper[4842]: I1111 13:32:34.218624 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:35 crc kubenswrapper[4842]: I1111 13:32:35.219080 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:35 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:35 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:35 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:35 crc kubenswrapper[4842]: I1111 13:32:35.219147 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:36 crc kubenswrapper[4842]: I1111 13:32:36.218168 4842 patch_prober.go:28] interesting pod/router-default-5444994796-9x6tf container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 11 13:32:36 crc kubenswrapper[4842]: [-]has-synced failed: reason withheld Nov 11 13:32:36 crc kubenswrapper[4842]: [+]process-running ok Nov 11 13:32:36 crc kubenswrapper[4842]: healthz check failed Nov 11 13:32:36 crc kubenswrapper[4842]: I1111 13:32:36.218504 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9x6tf" podUID="12b41511-3266-433f-9580-b102a55a087b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 11 13:32:37 crc kubenswrapper[4842]: I1111 13:32:37.219174 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:37 crc kubenswrapper[4842]: I1111 13:32:37.222385 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-9x6tf" Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.566998 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.622281 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir\") pod \"038beaba-891c-479e-9084-f9d302c26845\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.622353 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access\") pod \"038beaba-891c-479e-9084-f9d302c26845\" (UID: \"038beaba-891c-479e-9084-f9d302c26845\") " Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.622481 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "038beaba-891c-479e-9084-f9d302c26845" (UID: "038beaba-891c-479e-9084-f9d302c26845"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.627884 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "038beaba-891c-479e-9084-f9d302c26845" (UID: "038beaba-891c-479e-9084-f9d302c26845"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.722863 4842 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/038beaba-891c-479e-9084-f9d302c26845-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:38 crc kubenswrapper[4842]: I1111 13:32:38.723144 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/038beaba-891c-479e-9084-f9d302c26845-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 11 13:32:39 crc kubenswrapper[4842]: I1111 13:32:39.123219 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"038beaba-891c-479e-9084-f9d302c26845","Type":"ContainerDied","Data":"95d9c5aa1719cccfb0b9325b2e328d1108cd20849eb326c4ede40958af7f8a6a"} Nov 11 13:32:39 crc kubenswrapper[4842]: I1111 13:32:39.123264 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 11 13:32:39 crc kubenswrapper[4842]: I1111 13:32:39.123263 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95d9c5aa1719cccfb0b9325b2e328d1108cd20849eb326c4ede40958af7f8a6a" Nov 11 13:32:40 crc kubenswrapper[4842]: I1111 13:32:40.813660 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:32:41 crc kubenswrapper[4842]: I1111 13:32:41.074531 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-tncv8" Nov 11 13:32:41 crc kubenswrapper[4842]: I1111 13:32:41.118828 4842 patch_prober.go:28] interesting pod/console-f9d7485db-lwbtb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 11 13:32:41 crc kubenswrapper[4842]: I1111 13:32:41.118875 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lwbtb" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 11 13:32:44 crc kubenswrapper[4842]: I1111 13:32:44.961263 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:32:44 crc kubenswrapper[4842]: I1111 13:32:44.961947 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:32:51 crc kubenswrapper[4842]: I1111 13:32:51.124202 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:51 crc kubenswrapper[4842]: I1111 13:32:51.128468 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:32:51 crc kubenswrapper[4842]: I1111 13:32:51.603313 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p8sjn" Nov 11 13:32:57 crc kubenswrapper[4842]: E1111 13:32:57.057477 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 11 13:32:57 crc kubenswrapper[4842]: E1111 13:32:57.058310 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t6qn7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gfkfk_openshift-marketplace(f27cec58-9f93-429d-b971-b00a25be5058): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:32:57 crc kubenswrapper[4842]: E1111 13:32:57.059707 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gfkfk" podUID="f27cec58-9f93-429d-b971-b00a25be5058" Nov 11 13:32:58 crc kubenswrapper[4842]: I1111 13:32:58.685477 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 11 13:33:08 crc kubenswrapper[4842]: E1111 13:33:08.852642 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 11 13:33:08 crc kubenswrapper[4842]: E1111 13:33:08.853410 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sd8hx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-q2br9_openshift-marketplace(5600eccf-6390-45ff-a8e3-7f72d9327145): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:33:08 crc kubenswrapper[4842]: E1111 13:33:08.854590 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-q2br9" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" Nov 11 13:33:12 crc kubenswrapper[4842]: E1111 13:33:12.908444 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-q2br9" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.067071 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.073338 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hsz2z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mx9qp_openshift-marketplace(6715a6fa-a4e7-4be7-a043-f71fa81d1b98): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.074686 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-mx9qp" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.104429 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.104561 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9ccbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-6m7j9_openshift-marketplace(5710dac1-3f64-4560-a2d5-d7752f681e15): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.106331 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-6m7j9" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" Nov 11 13:33:13 crc kubenswrapper[4842]: I1111 13:33:13.292269 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerStarted","Data":"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07"} Nov 11 13:33:13 crc kubenswrapper[4842]: I1111 13:33:13.294344 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerStarted","Data":"cce36b052305842cd4411f24c26cf390e2028a16dc7d4194f263fc47eca5af42"} Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.296181 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mx9qp" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.297595 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-6m7j9" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.327820 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.328184 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7sl86,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-254hd_openshift-marketplace(830221cf-75b5-4942-aa0b-3cb82d9e0222): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.329596 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-254hd" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" Nov 11 13:33:13 crc kubenswrapper[4842]: I1111 13:33:13.355086 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-hbtjv"] Nov 11 13:33:13 crc kubenswrapper[4842]: W1111 13:33:13.364237 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b899889_1664_4e26_9cc9_0667626ac715.slice/crio-a2a0a175e5724ac257dcb0416a0e8bb53d8577d00f415e5f655cca8d6ee048aa WatchSource:0}: Error finding container a2a0a175e5724ac257dcb0416a0e8bb53d8577d00f415e5f655cca8d6ee048aa: Status 404 returned error can't find the container with id a2a0a175e5724ac257dcb0416a0e8bb53d8577d00f415e5f655cca8d6ee048aa Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.775083 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.775224 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b99lm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-2479l_openshift-marketplace(55880057-105b-4a56-a0db-9e9aaff70a4c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:33:13 crc kubenswrapper[4842]: E1111 13:33:13.776565 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-2479l" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.301483 4842 generic.go:334] "Generic (PLEG): container finished" podID="f27cec58-9f93-429d-b971-b00a25be5058" containerID="72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00" exitCode=0 Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.301573 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerDied","Data":"72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00"} Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.303392 4842 generic.go:334] "Generic (PLEG): container finished" podID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerID="cce36b052305842cd4411f24c26cf390e2028a16dc7d4194f263fc47eca5af42" exitCode=0 Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.303454 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerDied","Data":"cce36b052305842cd4411f24c26cf390e2028a16dc7d4194f263fc47eca5af42"} Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.305833 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" event={"ID":"6b899889-1664-4e26-9cc9-0667626ac715","Type":"ContainerStarted","Data":"369838fc32f38f5f19ed37122d627e42c3f558b88765519d0c6b3226cde9d3a1"} Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.305863 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" event={"ID":"6b899889-1664-4e26-9cc9-0667626ac715","Type":"ContainerStarted","Data":"b28cb7183a2ce1ea4ee8007271b0ae42d372eea3c7e68740d6d5825fa0244f22"} Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.305875 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-hbtjv" event={"ID":"6b899889-1664-4e26-9cc9-0667626ac715","Type":"ContainerStarted","Data":"a2a0a175e5724ac257dcb0416a0e8bb53d8577d00f415e5f655cca8d6ee048aa"} Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.309220 4842 generic.go:334] "Generic (PLEG): container finished" podID="dff8b002-18bd-499b-92d2-f739d29131e8" containerID="b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07" exitCode=0 Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.309244 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerDied","Data":"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07"} Nov 11 13:33:14 crc kubenswrapper[4842]: E1111 13:33:14.311617 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-2479l" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" Nov 11 13:33:14 crc kubenswrapper[4842]: E1111 13:33:14.312616 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-254hd" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.383826 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-hbtjv" podStartSLOduration=184.383803809 podStartE2EDuration="3m4.383803809s" podCreationTimestamp="2025-11-11 13:30:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:33:14.383143627 +0000 UTC m=+205.043433246" watchObservedRunningTime="2025-11-11 13:33:14.383803809 +0000 UTC m=+205.044093428" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.961154 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.961502 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.961553 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.962003 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:33:14 crc kubenswrapper[4842]: I1111 13:33:14.962130 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a" gracePeriod=600 Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.316588 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerStarted","Data":"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7"} Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.320108 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerStarted","Data":"b4fd87efef59f63b887c4c00203f59324e34a28f9dd18601900a1ec94295a992"} Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.322419 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a" exitCode=0 Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.322464 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a"} Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.322492 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d"} Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.324318 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerStarted","Data":"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf"} Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.336794 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gfkfk" podStartSLOduration=3.3087319170000002 podStartE2EDuration="56.336769033s" podCreationTimestamp="2025-11-11 13:32:19 +0000 UTC" firstStartedPulling="2025-11-11 13:32:21.800343554 +0000 UTC m=+152.460633173" lastFinishedPulling="2025-11-11 13:33:14.82838067 +0000 UTC m=+205.488670289" observedRunningTime="2025-11-11 13:33:15.334813647 +0000 UTC m=+205.995103266" watchObservedRunningTime="2025-11-11 13:33:15.336769033 +0000 UTC m=+205.997058662" Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.363895 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rznqq" podStartSLOduration=3.329278619 podStartE2EDuration="57.363879341s" podCreationTimestamp="2025-11-11 13:32:18 +0000 UTC" firstStartedPulling="2025-11-11 13:32:20.736813458 +0000 UTC m=+151.397103077" lastFinishedPulling="2025-11-11 13:33:14.77141418 +0000 UTC m=+205.431703799" observedRunningTime="2025-11-11 13:33:15.361858151 +0000 UTC m=+206.022147770" watchObservedRunningTime="2025-11-11 13:33:15.363879341 +0000 UTC m=+206.024168960" Nov 11 13:33:15 crc kubenswrapper[4842]: I1111 13:33:15.378760 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5m2t2" podStartSLOduration=2.646225559 podStartE2EDuration="54.378742079s" podCreationTimestamp="2025-11-11 13:32:21 +0000 UTC" firstStartedPulling="2025-11-11 13:32:22.975441 +0000 UTC m=+153.635730619" lastFinishedPulling="2025-11-11 13:33:14.70795751 +0000 UTC m=+205.368247139" observedRunningTime="2025-11-11 13:33:15.376564615 +0000 UTC m=+206.036854234" watchObservedRunningTime="2025-11-11 13:33:15.378742079 +0000 UTC m=+206.039031698" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.387691 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.388344 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.529335 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.635233 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.635291 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:19 crc kubenswrapper[4842]: I1111 13:33:19.679717 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:20 crc kubenswrapper[4842]: I1111 13:33:20.386375 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:33:20 crc kubenswrapper[4842]: I1111 13:33:20.389400 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:21 crc kubenswrapper[4842]: I1111 13:33:21.525440 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:33:21 crc kubenswrapper[4842]: I1111 13:33:21.674120 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:21 crc kubenswrapper[4842]: I1111 13:33:21.674184 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:21 crc kubenswrapper[4842]: I1111 13:33:21.715602 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:22 crc kubenswrapper[4842]: I1111 13:33:22.519644 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gfkfk" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="registry-server" containerID="cri-o://dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7" gracePeriod=2 Nov 11 13:33:22 crc kubenswrapper[4842]: I1111 13:33:22.556719 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:22 crc kubenswrapper[4842]: I1111 13:33:22.941937 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.129001 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content\") pod \"f27cec58-9f93-429d-b971-b00a25be5058\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.129047 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities\") pod \"f27cec58-9f93-429d-b971-b00a25be5058\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.129094 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6qn7\" (UniqueName: \"kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7\") pod \"f27cec58-9f93-429d-b971-b00a25be5058\" (UID: \"f27cec58-9f93-429d-b971-b00a25be5058\") " Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.130298 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities" (OuterVolumeSpecName: "utilities") pod "f27cec58-9f93-429d-b971-b00a25be5058" (UID: "f27cec58-9f93-429d-b971-b00a25be5058"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.138309 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7" (OuterVolumeSpecName: "kube-api-access-t6qn7") pod "f27cec58-9f93-429d-b971-b00a25be5058" (UID: "f27cec58-9f93-429d-b971-b00a25be5058"). InnerVolumeSpecName "kube-api-access-t6qn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.183150 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f27cec58-9f93-429d-b971-b00a25be5058" (UID: "f27cec58-9f93-429d-b971-b00a25be5058"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.231136 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.231173 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f27cec58-9f93-429d-b971-b00a25be5058-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.231186 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6qn7\" (UniqueName: \"kubernetes.io/projected/f27cec58-9f93-429d-b971-b00a25be5058-kube-api-access-t6qn7\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.526479 4842 generic.go:334] "Generic (PLEG): container finished" podID="f27cec58-9f93-429d-b971-b00a25be5058" containerID="dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7" exitCode=0 Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.526549 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gfkfk" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.526563 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerDied","Data":"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7"} Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.526987 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gfkfk" event={"ID":"f27cec58-9f93-429d-b971-b00a25be5058","Type":"ContainerDied","Data":"7f47ddf83476d7dbf55874ee9c4af1a3c444698c47ee9960b684fa48a13b84d6"} Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.527027 4842 scope.go:117] "RemoveContainer" containerID="dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.551738 4842 scope.go:117] "RemoveContainer" containerID="72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.555614 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.560314 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gfkfk"] Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.581506 4842 scope.go:117] "RemoveContainer" containerID="ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.619057 4842 scope.go:117] "RemoveContainer" containerID="dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7" Nov 11 13:33:23 crc kubenswrapper[4842]: E1111 13:33:23.619823 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7\": container with ID starting with dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7 not found: ID does not exist" containerID="dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.619861 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7"} err="failed to get container status \"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7\": rpc error: code = NotFound desc = could not find container \"dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7\": container with ID starting with dcc607ff23a06db849e352b155b251dd179e56e468c621eff64b82c7f58d6ae7 not found: ID does not exist" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.619886 4842 scope.go:117] "RemoveContainer" containerID="72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00" Nov 11 13:33:23 crc kubenswrapper[4842]: E1111 13:33:23.620312 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00\": container with ID starting with 72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00 not found: ID does not exist" containerID="72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.620338 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00"} err="failed to get container status \"72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00\": rpc error: code = NotFound desc = could not find container \"72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00\": container with ID starting with 72ec8cc06db30fccf5f262c5647a8d97455dfe5b2f819c1ee75a444c63088c00 not found: ID does not exist" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.620365 4842 scope.go:117] "RemoveContainer" containerID="ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3" Nov 11 13:33:23 crc kubenswrapper[4842]: E1111 13:33:23.620669 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3\": container with ID starting with ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3 not found: ID does not exist" containerID="ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.620704 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3"} err="failed to get container status \"ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3\": rpc error: code = NotFound desc = could not find container \"ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3\": container with ID starting with ba3bd7c7e29858053e3ee6d6ce31f2558c499c84a4be3042b8c48062b382e6a3 not found: ID does not exist" Nov 11 13:33:23 crc kubenswrapper[4842]: I1111 13:33:23.924426 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:33:24 crc kubenswrapper[4842]: I1111 13:33:24.066658 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f27cec58-9f93-429d-b971-b00a25be5058" path="/var/lib/kubelet/pods/f27cec58-9f93-429d-b971-b00a25be5058/volumes" Nov 11 13:33:24 crc kubenswrapper[4842]: I1111 13:33:24.533053 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5m2t2" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="registry-server" containerID="cri-o://b4fd87efef59f63b887c4c00203f59324e34a28f9dd18601900a1ec94295a992" gracePeriod=2 Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.549786 4842 generic.go:334] "Generic (PLEG): container finished" podID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerID="b4fd87efef59f63b887c4c00203f59324e34a28f9dd18601900a1ec94295a992" exitCode=0 Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.549827 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerDied","Data":"b4fd87efef59f63b887c4c00203f59324e34a28f9dd18601900a1ec94295a992"} Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.704306 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.864832 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bnkm\" (UniqueName: \"kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm\") pod \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.864913 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content\") pod \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.864937 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities\") pod \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\" (UID: \"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce\") " Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.865793 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities" (OuterVolumeSpecName: "utilities") pod "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" (UID: "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.869691 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm" (OuterVolumeSpecName: "kube-api-access-2bnkm") pod "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" (UID: "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce"). InnerVolumeSpecName "kube-api-access-2bnkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.884459 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" (UID: "79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.966487 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bnkm\" (UniqueName: \"kubernetes.io/projected/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-kube-api-access-2bnkm\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.966524 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:25 crc kubenswrapper[4842]: I1111 13:33:25.966535 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.556759 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerStarted","Data":"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8"} Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.559214 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5m2t2" event={"ID":"79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce","Type":"ContainerDied","Data":"46cf31606a16867cbb0a3fbdec4289de0419a7467e83fd4c338cc10a9dba2852"} Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.559257 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5m2t2" Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.559255 4842 scope.go:117] "RemoveContainer" containerID="b4fd87efef59f63b887c4c00203f59324e34a28f9dd18601900a1ec94295a992" Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.561985 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerStarted","Data":"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888"} Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.581287 4842 scope.go:117] "RemoveContainer" containerID="cce36b052305842cd4411f24c26cf390e2028a16dc7d4194f263fc47eca5af42" Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.587066 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.591382 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5m2t2"] Nov 11 13:33:26 crc kubenswrapper[4842]: I1111 13:33:26.601602 4842 scope.go:117] "RemoveContainer" containerID="7c9d12af9f23906b062f257ce583abcffc9ca255d9ac983b5ba9dbafb1105b67" Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.569546 4842 generic.go:334] "Generic (PLEG): container finished" podID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerID="88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8" exitCode=0 Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.569634 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerDied","Data":"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8"} Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.575767 4842 generic.go:334] "Generic (PLEG): container finished" podID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerID="4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888" exitCode=0 Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.575856 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerDied","Data":"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888"} Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.577740 4842 generic.go:334] "Generic (PLEG): container finished" podID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerID="42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591" exitCode=0 Nov 11 13:33:27 crc kubenswrapper[4842]: I1111 13:33:27.577780 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerDied","Data":"42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591"} Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.079211 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" path="/var/lib/kubelet/pods/79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce/volumes" Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.584172 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerStarted","Data":"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1"} Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.585679 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerStarted","Data":"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca"} Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.587022 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerStarted","Data":"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0"} Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.603036 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6m7j9" podStartSLOduration=2.584618687 podStartE2EDuration="1m6.603022529s" podCreationTimestamp="2025-11-11 13:32:22 +0000 UTC" firstStartedPulling="2025-11-11 13:32:23.980639912 +0000 UTC m=+154.640929531" lastFinishedPulling="2025-11-11 13:33:27.999043764 +0000 UTC m=+218.659333373" observedRunningTime="2025-11-11 13:33:28.599419886 +0000 UTC m=+219.259709505" watchObservedRunningTime="2025-11-11 13:33:28.603022529 +0000 UTC m=+219.263312148" Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.624806 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q2br9" podStartSLOduration=2.427713853 podStartE2EDuration="1m7.624791224s" podCreationTimestamp="2025-11-11 13:32:21 +0000 UTC" firstStartedPulling="2025-11-11 13:32:22.934084184 +0000 UTC m=+153.594373803" lastFinishedPulling="2025-11-11 13:33:28.131161555 +0000 UTC m=+218.791451174" observedRunningTime="2025-11-11 13:33:28.623395435 +0000 UTC m=+219.283685054" watchObservedRunningTime="2025-11-11 13:33:28.624791224 +0000 UTC m=+219.285080843" Nov 11 13:33:28 crc kubenswrapper[4842]: I1111 13:33:28.640994 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mx9qp" podStartSLOduration=3.340836671 podStartE2EDuration="1m9.640978097s" podCreationTimestamp="2025-11-11 13:32:19 +0000 UTC" firstStartedPulling="2025-11-11 13:32:21.837333335 +0000 UTC m=+152.497622954" lastFinishedPulling="2025-11-11 13:33:28.137474761 +0000 UTC m=+218.797764380" observedRunningTime="2025-11-11 13:33:28.639034001 +0000 UTC m=+219.299323610" watchObservedRunningTime="2025-11-11 13:33:28.640978097 +0000 UTC m=+219.301267716" Nov 11 13:33:29 crc kubenswrapper[4842]: I1111 13:33:29.595143 4842 generic.go:334] "Generic (PLEG): container finished" podID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerID="2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07" exitCode=0 Nov 11 13:33:29 crc kubenswrapper[4842]: I1111 13:33:29.595182 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerDied","Data":"2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07"} Nov 11 13:33:29 crc kubenswrapper[4842]: I1111 13:33:29.749324 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:29 crc kubenswrapper[4842]: I1111 13:33:29.749870 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:30 crc kubenswrapper[4842]: I1111 13:33:30.603038 4842 generic.go:334] "Generic (PLEG): container finished" podID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerID="604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343" exitCode=0 Nov 11 13:33:30 crc kubenswrapper[4842]: I1111 13:33:30.603112 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerDied","Data":"604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343"} Nov 11 13:33:30 crc kubenswrapper[4842]: I1111 13:33:30.605393 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerStarted","Data":"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6"} Nov 11 13:33:30 crc kubenswrapper[4842]: I1111 13:33:30.641670 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-254hd" podStartSLOduration=3.267715284 podStartE2EDuration="1m12.641652767s" podCreationTimestamp="2025-11-11 13:32:18 +0000 UTC" firstStartedPulling="2025-11-11 13:32:20.707208208 +0000 UTC m=+151.367497827" lastFinishedPulling="2025-11-11 13:33:30.081145691 +0000 UTC m=+220.741435310" observedRunningTime="2025-11-11 13:33:30.640713616 +0000 UTC m=+221.301003235" watchObservedRunningTime="2025-11-11 13:33:30.641652767 +0000 UTC m=+221.301942386" Nov 11 13:33:30 crc kubenswrapper[4842]: I1111 13:33:30.793665 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-mx9qp" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="registry-server" probeResult="failure" output=< Nov 11 13:33:30 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 13:33:30 crc kubenswrapper[4842]: > Nov 11 13:33:31 crc kubenswrapper[4842]: I1111 13:33:31.613797 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerStarted","Data":"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d"} Nov 11 13:33:31 crc kubenswrapper[4842]: I1111 13:33:31.632374 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2479l" podStartSLOduration=3.487672602 podStartE2EDuration="1m11.632353693s" podCreationTimestamp="2025-11-11 13:32:20 +0000 UTC" firstStartedPulling="2025-11-11 13:32:22.947643145 +0000 UTC m=+153.607932764" lastFinishedPulling="2025-11-11 13:33:31.092324236 +0000 UTC m=+221.752613855" observedRunningTime="2025-11-11 13:33:31.630045304 +0000 UTC m=+222.290334953" watchObservedRunningTime="2025-11-11 13:33:31.632353693 +0000 UTC m=+222.292643312" Nov 11 13:33:32 crc kubenswrapper[4842]: I1111 13:33:32.297619 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:33:32 crc kubenswrapper[4842]: I1111 13:33:32.297987 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:33:32 crc kubenswrapper[4842]: I1111 13:33:32.689111 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:32 crc kubenswrapper[4842]: I1111 13:33:32.689170 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:33 crc kubenswrapper[4842]: I1111 13:33:33.337149 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q2br9" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="registry-server" probeResult="failure" output=< Nov 11 13:33:33 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 13:33:33 crc kubenswrapper[4842]: > Nov 11 13:33:33 crc kubenswrapper[4842]: I1111 13:33:33.728267 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6m7j9" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="registry-server" probeResult="failure" output=< Nov 11 13:33:33 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 13:33:33 crc kubenswrapper[4842]: > Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.217783 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.218219 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.268625 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.702415 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.792786 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:39 crc kubenswrapper[4842]: I1111 13:33:39.841443 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:40 crc kubenswrapper[4842]: I1111 13:33:40.326580 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:33:41 crc kubenswrapper[4842]: I1111 13:33:41.318326 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:33:41 crc kubenswrapper[4842]: I1111 13:33:41.318675 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:33:41 crc kubenswrapper[4842]: I1111 13:33:41.359053 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:33:41 crc kubenswrapper[4842]: I1111 13:33:41.662017 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mx9qp" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="registry-server" containerID="cri-o://0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca" gracePeriod=2 Nov 11 13:33:41 crc kubenswrapper[4842]: I1111 13:33:41.703582 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.004966 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.171693 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content\") pod \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.171743 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsz2z\" (UniqueName: \"kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z\") pod \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.171784 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities\") pod \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\" (UID: \"6715a6fa-a4e7-4be7-a043-f71fa81d1b98\") " Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.172577 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities" (OuterVolumeSpecName: "utilities") pod "6715a6fa-a4e7-4be7-a043-f71fa81d1b98" (UID: "6715a6fa-a4e7-4be7-a043-f71fa81d1b98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.176347 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z" (OuterVolumeSpecName: "kube-api-access-hsz2z") pod "6715a6fa-a4e7-4be7-a043-f71fa81d1b98" (UID: "6715a6fa-a4e7-4be7-a043-f71fa81d1b98"). InnerVolumeSpecName "kube-api-access-hsz2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.214947 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6715a6fa-a4e7-4be7-a043-f71fa81d1b98" (UID: "6715a6fa-a4e7-4be7-a043-f71fa81d1b98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.272890 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.272926 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.272937 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsz2z\" (UniqueName: \"kubernetes.io/projected/6715a6fa-a4e7-4be7-a043-f71fa81d1b98-kube-api-access-hsz2z\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.337981 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.382799 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.669304 4842 generic.go:334] "Generic (PLEG): container finished" podID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerID="0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca" exitCode=0 Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.669385 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mx9qp" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.669439 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerDied","Data":"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca"} Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.669496 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mx9qp" event={"ID":"6715a6fa-a4e7-4be7-a043-f71fa81d1b98","Type":"ContainerDied","Data":"080a78ac2a6e963c05d8d6a66cfdf94a3425327ff804244380ce639f0b4105fc"} Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.669515 4842 scope.go:117] "RemoveContainer" containerID="0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.684409 4842 scope.go:117] "RemoveContainer" containerID="42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.701605 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.704730 4842 scope.go:117] "RemoveContainer" containerID="f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.706859 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mx9qp"] Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.728091 4842 scope.go:117] "RemoveContainer" containerID="0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca" Nov 11 13:33:42 crc kubenswrapper[4842]: E1111 13:33:42.728820 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca\": container with ID starting with 0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca not found: ID does not exist" containerID="0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.728883 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca"} err="failed to get container status \"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca\": rpc error: code = NotFound desc = could not find container \"0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca\": container with ID starting with 0cfd8604ff1123850cddef53f3fece67214f748da425729db16671ed60008aca not found: ID does not exist" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.728927 4842 scope.go:117] "RemoveContainer" containerID="42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591" Nov 11 13:33:42 crc kubenswrapper[4842]: E1111 13:33:42.729346 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591\": container with ID starting with 42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591 not found: ID does not exist" containerID="42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.729400 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591"} err="failed to get container status \"42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591\": rpc error: code = NotFound desc = could not find container \"42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591\": container with ID starting with 42df1baa4bdb5c2dc2c813349d835fe486407caf7e73f1d77fd20f8289c53591 not found: ID does not exist" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.729443 4842 scope.go:117] "RemoveContainer" containerID="f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62" Nov 11 13:33:42 crc kubenswrapper[4842]: E1111 13:33:42.729846 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62\": container with ID starting with f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62 not found: ID does not exist" containerID="f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.729886 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62"} err="failed to get container status \"f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62\": rpc error: code = NotFound desc = could not find container \"f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62\": container with ID starting with f7e359c740bea94d1facf751e736d5a65492ea77375149b04770c6ae01adde62 not found: ID does not exist" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.736727 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:42 crc kubenswrapper[4842]: I1111 13:33:42.781671 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:44 crc kubenswrapper[4842]: I1111 13:33:44.067863 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" path="/var/lib/kubelet/pods/6715a6fa-a4e7-4be7-a043-f71fa81d1b98/volumes" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.123532 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.123762 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6m7j9" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="registry-server" containerID="cri-o://c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1" gracePeriod=2 Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.561813 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.715169 4842 generic.go:334] "Generic (PLEG): container finished" podID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerID="c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1" exitCode=0 Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.715223 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerDied","Data":"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1"} Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.715255 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6m7j9" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.715287 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6m7j9" event={"ID":"5710dac1-3f64-4560-a2d5-d7752f681e15","Type":"ContainerDied","Data":"f76cf67a63ac7f4fa2a6cec2666451aea763666061f550319b3b0adddd6b3a95"} Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.715309 4842 scope.go:117] "RemoveContainer" containerID="c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.733541 4842 scope.go:117] "RemoveContainer" containerID="4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.734033 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities\") pod \"5710dac1-3f64-4560-a2d5-d7752f681e15\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.734078 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ccbs\" (UniqueName: \"kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs\") pod \"5710dac1-3f64-4560-a2d5-d7752f681e15\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.734105 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content\") pod \"5710dac1-3f64-4560-a2d5-d7752f681e15\" (UID: \"5710dac1-3f64-4560-a2d5-d7752f681e15\") " Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.734790 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities" (OuterVolumeSpecName: "utilities") pod "5710dac1-3f64-4560-a2d5-d7752f681e15" (UID: "5710dac1-3f64-4560-a2d5-d7752f681e15"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.743408 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs" (OuterVolumeSpecName: "kube-api-access-9ccbs") pod "5710dac1-3f64-4560-a2d5-d7752f681e15" (UID: "5710dac1-3f64-4560-a2d5-d7752f681e15"). InnerVolumeSpecName "kube-api-access-9ccbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.749307 4842 scope.go:117] "RemoveContainer" containerID="64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.780529 4842 scope.go:117] "RemoveContainer" containerID="c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1" Nov 11 13:33:46 crc kubenswrapper[4842]: E1111 13:33:46.781250 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1\": container with ID starting with c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1 not found: ID does not exist" containerID="c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.781290 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1"} err="failed to get container status \"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1\": rpc error: code = NotFound desc = could not find container \"c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1\": container with ID starting with c8b55742e41050e79bfda616439629df13cc5bfa46c4799ed7382acbf8832dc1 not found: ID does not exist" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.781322 4842 scope.go:117] "RemoveContainer" containerID="4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888" Nov 11 13:33:46 crc kubenswrapper[4842]: E1111 13:33:46.781707 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888\": container with ID starting with 4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888 not found: ID does not exist" containerID="4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.781732 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888"} err="failed to get container status \"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888\": rpc error: code = NotFound desc = could not find container \"4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888\": container with ID starting with 4aa372a81943fd78ce805167e34faeb83affb6d27eea6673780012cc26705888 not found: ID does not exist" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.781750 4842 scope.go:117] "RemoveContainer" containerID="64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b" Nov 11 13:33:46 crc kubenswrapper[4842]: E1111 13:33:46.781956 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b\": container with ID starting with 64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b not found: ID does not exist" containerID="64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.782046 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b"} err="failed to get container status \"64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b\": rpc error: code = NotFound desc = could not find container \"64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b\": container with ID starting with 64809c8d8d96a8a135b2708fb2225186d7f6743502ce1789aa53a63d9feb8d9b not found: ID does not exist" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.823848 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5710dac1-3f64-4560-a2d5-d7752f681e15" (UID: "5710dac1-3f64-4560-a2d5-d7752f681e15"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.835872 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.835901 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ccbs\" (UniqueName: \"kubernetes.io/projected/5710dac1-3f64-4560-a2d5-d7752f681e15-kube-api-access-9ccbs\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:46 crc kubenswrapper[4842]: I1111 13:33:46.835913 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5710dac1-3f64-4560-a2d5-d7752f681e15-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:33:47 crc kubenswrapper[4842]: I1111 13:33:47.050691 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:33:47 crc kubenswrapper[4842]: I1111 13:33:47.055440 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6m7j9"] Nov 11 13:33:48 crc kubenswrapper[4842]: I1111 13:33:48.066915 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" path="/var/lib/kubelet/pods/5710dac1-3f64-4560-a2d5-d7752f681e15/volumes" Nov 11 13:33:50 crc kubenswrapper[4842]: I1111 13:33:50.537053 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.569271 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" containerID="cri-o://b79ccc8cb475cb260ac4a4b995989ee6e96b70f4ae3e0719d0c991efe158d7fa" gracePeriod=15 Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.855178 4842 generic.go:334] "Generic (PLEG): container finished" podID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerID="b79ccc8cb475cb260ac4a4b995989ee6e96b70f4ae3e0719d0c991efe158d7fa" exitCode=0 Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.855271 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" event={"ID":"a25deeed-3854-4f02-aa77-b7e616f2f2b8","Type":"ContainerDied","Data":"b79ccc8cb475cb260ac4a4b995989ee6e96b70f4ae3e0719d0c991efe158d7fa"} Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.922536 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950398 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6644f974c8-9rrpn"] Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950640 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950653 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950663 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950671 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950684 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950691 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950698 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950706 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950717 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950724 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950732 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950739 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950749 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="038beaba-891c-479e-9084-f9d302c26845" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950757 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="038beaba-891c-479e-9084-f9d302c26845" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950765 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950772 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950782 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950789 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950798 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950805 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950815 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1353775-2921-4576-9b5b-8ca99d8195a7" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950822 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1353775-2921-4576-9b5b-8ca99d8195a7" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950831 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950838 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="extract-content" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950848 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950855 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950863 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950870 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="extract-utilities" Nov 11 13:34:15 crc kubenswrapper[4842]: E1111 13:34:15.950880 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.950888 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951025 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b40098-5bf0-4b6e-90b2-8d3c2e47d0ce" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951035 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" containerName="oauth-openshift" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951049 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5710dac1-3f64-4560-a2d5-d7752f681e15" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951059 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="038beaba-891c-479e-9084-f9d302c26845" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951072 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="6715a6fa-a4e7-4be7-a043-f71fa81d1b98" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951083 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1353775-2921-4576-9b5b-8ca99d8195a7" containerName="pruner" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951092 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f27cec58-9f93-429d-b971-b00a25be5058" containerName="registry-server" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.951543 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:15 crc kubenswrapper[4842]: I1111 13:34:15.969994 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6644f974c8-9rrpn"] Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115390 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115469 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115496 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115516 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115545 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115566 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115587 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115609 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115633 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115657 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115693 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115714 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115738 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpt8z\" (UniqueName: \"kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115764 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session\") pod \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\" (UID: \"a25deeed-3854-4f02-aa77-b7e616f2f2b8\") " Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115855 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115886 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115917 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115946 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-session\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.115997 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-error\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116025 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116053 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/763d1338-b72d-4229-a706-47879c606e88-audit-dir\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116075 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsdnn\" (UniqueName: \"kubernetes.io/projected/763d1338-b72d-4229-a706-47879c606e88-kube-api-access-zsdnn\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116141 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-login\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116171 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116196 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116224 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-audit-policies\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116274 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116366 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.116735 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.117095 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.117146 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.117882 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.122241 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.122734 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123006 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123337 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z" (OuterVolumeSpecName: "kube-api-access-dpt8z") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "kube-api-access-dpt8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123497 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123767 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123865 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.123867 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.124820 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a25deeed-3854-4f02-aa77-b7e616f2f2b8" (UID: "a25deeed-3854-4f02-aa77-b7e616f2f2b8"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.216926 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-login\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.216974 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.216998 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217034 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-audit-policies\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217053 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217078 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217149 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217176 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217200 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217223 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-session\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217256 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-error\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217283 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217310 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/763d1338-b72d-4229-a706-47879c606e88-audit-dir\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217331 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsdnn\" (UniqueName: \"kubernetes.io/projected/763d1338-b72d-4229-a706-47879c606e88-kube-api-access-zsdnn\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217380 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217396 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217409 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217424 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217437 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217449 4842 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217461 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217473 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217484 4842 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a25deeed-3854-4f02-aa77-b7e616f2f2b8-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217672 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217685 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217698 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpt8z\" (UniqueName: \"kubernetes.io/projected/a25deeed-3854-4f02-aa77-b7e616f2f2b8-kube-api-access-dpt8z\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217710 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217716 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/763d1338-b72d-4229-a706-47879c606e88-audit-dir\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.217722 4842 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a25deeed-3854-4f02-aa77-b7e616f2f2b8-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.219504 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.220031 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.220757 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-audit-policies\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.220901 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.228594 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.229575 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.230016 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.230067 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-login\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.230313 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-session\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.232416 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-template-error\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.232995 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.233336 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/763d1338-b72d-4229-a706-47879c606e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.234105 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsdnn\" (UniqueName: \"kubernetes.io/projected/763d1338-b72d-4229-a706-47879c606e88-kube-api-access-zsdnn\") pod \"oauth-openshift-6644f974c8-9rrpn\" (UID: \"763d1338-b72d-4229-a706-47879c606e88\") " pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.274377 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.650813 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6644f974c8-9rrpn"] Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.864050 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" event={"ID":"a25deeed-3854-4f02-aa77-b7e616f2f2b8","Type":"ContainerDied","Data":"4df0f5d72c297fc1c2b3d8f284ae30311fc4821656ce1602d58077fae2a07d0b"} Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.864126 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gc82n" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.864146 4842 scope.go:117] "RemoveContainer" containerID="b79ccc8cb475cb260ac4a4b995989ee6e96b70f4ae3e0719d0c991efe158d7fa" Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.864942 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" event={"ID":"763d1338-b72d-4229-a706-47879c606e88","Type":"ContainerStarted","Data":"d5f5e12abc6aeddecff1288203ecba45ccdef9022f2ef50e244bd3bb40b00344"} Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.895242 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:34:16 crc kubenswrapper[4842]: I1111 13:34:16.897674 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gc82n"] Nov 11 13:34:17 crc kubenswrapper[4842]: I1111 13:34:17.869795 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" event={"ID":"763d1338-b72d-4229-a706-47879c606e88","Type":"ContainerStarted","Data":"7262b100deda384c4b98ba4b4cd857b03985667b0d92fe4b1e4d17eff8b6e93c"} Nov 11 13:34:17 crc kubenswrapper[4842]: I1111 13:34:17.869969 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:17 crc kubenswrapper[4842]: I1111 13:34:17.875421 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" Nov 11 13:34:17 crc kubenswrapper[4842]: I1111 13:34:17.886854 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6644f974c8-9rrpn" podStartSLOduration=27.886838576 podStartE2EDuration="27.886838576s" podCreationTimestamp="2025-11-11 13:33:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:34:17.884750055 +0000 UTC m=+268.545039674" watchObservedRunningTime="2025-11-11 13:34:17.886838576 +0000 UTC m=+268.547128195" Nov 11 13:34:18 crc kubenswrapper[4842]: I1111 13:34:18.064287 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a25deeed-3854-4f02-aa77-b7e616f2f2b8" path="/var/lib/kubelet/pods/a25deeed-3854-4f02-aa77-b7e616f2f2b8/volumes" Nov 11 13:35:44 crc kubenswrapper[4842]: I1111 13:35:44.960924 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:35:44 crc kubenswrapper[4842]: I1111 13:35:44.961601 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.493850 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.496503 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-254hd" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="registry-server" containerID="cri-o://dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6" gracePeriod=30 Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.502208 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.502428 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rznqq" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="registry-server" containerID="cri-o://7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf" gracePeriod=30 Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.514515 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.514719 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" containerID="cri-o://39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed" gracePeriod=30 Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.527015 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.527539 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2479l" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="registry-server" containerID="cri-o://5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d" gracePeriod=30 Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.543592 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.543818 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q2br9" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="registry-server" containerID="cri-o://3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0" gracePeriod=30 Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.546903 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.547693 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.577356 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.691682 4842 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2wgl8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.692010 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.746072 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.746145 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.746195 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5t6z\" (UniqueName: \"kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.847735 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5t6z\" (UniqueName: \"kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.847797 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.847845 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.849466 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.860187 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.867828 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5t6z\" (UniqueName: \"kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z\") pod \"marketplace-operator-79b997595-95g48\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.957497 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.970080 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:36:01 crc kubenswrapper[4842]: I1111 13:36:01.970824 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.013566 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.015576 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.026340 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.152597 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttskp\" (UniqueName: \"kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp\") pod \"dff8b002-18bd-499b-92d2-f739d29131e8\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153053 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics\") pod \"37480fb1-03b9-4913-a336-c18363b1e85e\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153129 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities\") pod \"dff8b002-18bd-499b-92d2-f739d29131e8\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153159 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities\") pod \"830221cf-75b5-4942-aa0b-3cb82d9e0222\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153234 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd8hx\" (UniqueName: \"kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx\") pod \"5600eccf-6390-45ff-a8e3-7f72d9327145\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153261 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content\") pod \"55880057-105b-4a56-a0db-9e9aaff70a4c\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153294 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content\") pod \"5600eccf-6390-45ff-a8e3-7f72d9327145\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153342 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca\") pod \"37480fb1-03b9-4913-a336-c18363b1e85e\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153367 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities\") pod \"55880057-105b-4a56-a0db-9e9aaff70a4c\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153443 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content\") pod \"830221cf-75b5-4942-aa0b-3cb82d9e0222\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153472 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dxfl\" (UniqueName: \"kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl\") pod \"37480fb1-03b9-4913-a336-c18363b1e85e\" (UID: \"37480fb1-03b9-4913-a336-c18363b1e85e\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153496 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sl86\" (UniqueName: \"kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86\") pod \"830221cf-75b5-4942-aa0b-3cb82d9e0222\" (UID: \"830221cf-75b5-4942-aa0b-3cb82d9e0222\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153515 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities\") pod \"5600eccf-6390-45ff-a8e3-7f72d9327145\" (UID: \"5600eccf-6390-45ff-a8e3-7f72d9327145\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153538 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b99lm\" (UniqueName: \"kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm\") pod \"55880057-105b-4a56-a0db-9e9aaff70a4c\" (UID: \"55880057-105b-4a56-a0db-9e9aaff70a4c\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.153568 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content\") pod \"dff8b002-18bd-499b-92d2-f739d29131e8\" (UID: \"dff8b002-18bd-499b-92d2-f739d29131e8\") " Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.154384 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities" (OuterVolumeSpecName: "utilities") pod "dff8b002-18bd-499b-92d2-f739d29131e8" (UID: "dff8b002-18bd-499b-92d2-f739d29131e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.154707 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities" (OuterVolumeSpecName: "utilities") pod "55880057-105b-4a56-a0db-9e9aaff70a4c" (UID: "55880057-105b-4a56-a0db-9e9aaff70a4c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.155366 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "37480fb1-03b9-4913-a336-c18363b1e85e" (UID: "37480fb1-03b9-4913-a336-c18363b1e85e"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.155581 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.157123 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities" (OuterVolumeSpecName: "utilities") pod "5600eccf-6390-45ff-a8e3-7f72d9327145" (UID: "5600eccf-6390-45ff-a8e3-7f72d9327145"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.158074 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx" (OuterVolumeSpecName: "kube-api-access-sd8hx") pod "5600eccf-6390-45ff-a8e3-7f72d9327145" (UID: "5600eccf-6390-45ff-a8e3-7f72d9327145"). InnerVolumeSpecName "kube-api-access-sd8hx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.159033 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86" (OuterVolumeSpecName: "kube-api-access-7sl86") pod "830221cf-75b5-4942-aa0b-3cb82d9e0222" (UID: "830221cf-75b5-4942-aa0b-3cb82d9e0222"). InnerVolumeSpecName "kube-api-access-7sl86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.160578 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm" (OuterVolumeSpecName: "kube-api-access-b99lm") pod "55880057-105b-4a56-a0db-9e9aaff70a4c" (UID: "55880057-105b-4a56-a0db-9e9aaff70a4c"). InnerVolumeSpecName "kube-api-access-b99lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.160659 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl" (OuterVolumeSpecName: "kube-api-access-9dxfl") pod "37480fb1-03b9-4913-a336-c18363b1e85e" (UID: "37480fb1-03b9-4913-a336-c18363b1e85e"). InnerVolumeSpecName "kube-api-access-9dxfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.165370 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp" (OuterVolumeSpecName: "kube-api-access-ttskp") pod "dff8b002-18bd-499b-92d2-f739d29131e8" (UID: "dff8b002-18bd-499b-92d2-f739d29131e8"). InnerVolumeSpecName "kube-api-access-ttskp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.165471 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities" (OuterVolumeSpecName: "utilities") pod "830221cf-75b5-4942-aa0b-3cb82d9e0222" (UID: "830221cf-75b5-4942-aa0b-3cb82d9e0222"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.167995 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "37480fb1-03b9-4913-a336-c18363b1e85e" (UID: "37480fb1-03b9-4913-a336-c18363b1e85e"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.177023 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55880057-105b-4a56-a0db-9e9aaff70a4c" (UID: "55880057-105b-4a56-a0db-9e9aaff70a4c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.198919 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "830221cf-75b5-4942-aa0b-3cb82d9e0222" (UID: "830221cf-75b5-4942-aa0b-3cb82d9e0222"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.212721 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dff8b002-18bd-499b-92d2-f739d29131e8" (UID: "dff8b002-18bd-499b-92d2-f739d29131e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.249476 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5600eccf-6390-45ff-a8e3-7f72d9327145" (UID: "5600eccf-6390-45ff-a8e3-7f72d9327145"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254511 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254550 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254602 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254617 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254626 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dxfl\" (UniqueName: \"kubernetes.io/projected/37480fb1-03b9-4913-a336-c18363b1e85e-kube-api-access-9dxfl\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254634 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sl86\" (UniqueName: \"kubernetes.io/projected/830221cf-75b5-4942-aa0b-3cb82d9e0222-kube-api-access-7sl86\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254642 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5600eccf-6390-45ff-a8e3-7f72d9327145-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254650 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b99lm\" (UniqueName: \"kubernetes.io/projected/55880057-105b-4a56-a0db-9e9aaff70a4c-kube-api-access-b99lm\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254682 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254691 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttskp\" (UniqueName: \"kubernetes.io/projected/dff8b002-18bd-499b-92d2-f739d29131e8-kube-api-access-ttskp\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254700 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/37480fb1-03b9-4913-a336-c18363b1e85e-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254710 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff8b002-18bd-499b-92d2-f739d29131e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254719 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830221cf-75b5-4942-aa0b-3cb82d9e0222-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254727 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd8hx\" (UniqueName: \"kubernetes.io/projected/5600eccf-6390-45ff-a8e3-7f72d9327145-kube-api-access-sd8hx\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.254735 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55880057-105b-4a56-a0db-9e9aaff70a4c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.405486 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" event={"ID":"ce99883b-0503-48f7-9711-c7caaa523a00","Type":"ContainerStarted","Data":"fd1626eb34b9e9a09034f46dad8190949550347983a3c71a56e689006b552d58"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.405532 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" event={"ID":"ce99883b-0503-48f7-9711-c7caaa523a00","Type":"ContainerStarted","Data":"cb605b35a4f48b8b59191042c8eebb8e46c9af6c3f2b03f3a5bb47ef3877f5b1"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.407440 4842 generic.go:334] "Generic (PLEG): container finished" podID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerID="5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d" exitCode=0 Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.407490 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2479l" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.407519 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerDied","Data":"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.407550 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2479l" event={"ID":"55880057-105b-4a56-a0db-9e9aaff70a4c","Type":"ContainerDied","Data":"2bc4f0a1eae90bb9dd8126d80140e109b7762c7c91fda304ff600730fbbe0317"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.407570 4842 scope.go:117] "RemoveContainer" containerID="5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.413119 4842 generic.go:334] "Generic (PLEG): container finished" podID="dff8b002-18bd-499b-92d2-f739d29131e8" containerID="7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf" exitCode=0 Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.413170 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rznqq" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.413197 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerDied","Data":"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.413438 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rznqq" event={"ID":"dff8b002-18bd-499b-92d2-f739d29131e8","Type":"ContainerDied","Data":"7a030e8726637406a9bf7ee8254fb8dc53d7991fc4f4412ac845fde00478af3e"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.415907 4842 generic.go:334] "Generic (PLEG): container finished" podID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerID="dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6" exitCode=0 Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.415954 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerDied","Data":"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.415986 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254hd" event={"ID":"830221cf-75b5-4942-aa0b-3cb82d9e0222","Type":"ContainerDied","Data":"48bd50ed6491018dc06a96351eca8cbea088e04a84d0435a087700188b00420a"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.415935 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254hd" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.417883 4842 generic.go:334] "Generic (PLEG): container finished" podID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerID="3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0" exitCode=0 Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.417982 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2br9" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.418143 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerDied","Data":"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.418184 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2br9" event={"ID":"5600eccf-6390-45ff-a8e3-7f72d9327145","Type":"ContainerDied","Data":"962f4e084dbd1bc11f5541402eeea74c0f496c05e06f7a288f3b9e4dd753d366"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.420407 4842 scope.go:117] "RemoveContainer" containerID="604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.420938 4842 generic.go:334] "Generic (PLEG): container finished" podID="37480fb1-03b9-4913-a336-c18363b1e85e" containerID="39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed" exitCode=0 Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.420974 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" event={"ID":"37480fb1-03b9-4913-a336-c18363b1e85e","Type":"ContainerDied","Data":"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.420994 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" event={"ID":"37480fb1-03b9-4913-a336-c18363b1e85e","Type":"ContainerDied","Data":"96617cd32ea141498a62f5f2a9c71e998406aa89f57e1d4387405c7c4e790b37"} Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.421055 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wgl8" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.423766 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" podStartSLOduration=1.4237452990000001 podStartE2EDuration="1.423745299s" podCreationTimestamp="2025-11-11 13:36:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:36:02.422721005 +0000 UTC m=+373.083010634" watchObservedRunningTime="2025-11-11 13:36:02.423745299 +0000 UTC m=+373.084034918" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.440532 4842 scope.go:117] "RemoveContainer" containerID="6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.460667 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.462842 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2479l"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.472257 4842 scope.go:117] "RemoveContainer" containerID="5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.472802 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d\": container with ID starting with 5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d not found: ID does not exist" containerID="5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.472855 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d"} err="failed to get container status \"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d\": rpc error: code = NotFound desc = could not find container \"5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d\": container with ID starting with 5d83176c3d4b5cc939ee70ceb4e4fabb95fddb011790cd823c50ef1536d70b9d not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.472891 4842 scope.go:117] "RemoveContainer" containerID="604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.474463 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343\": container with ID starting with 604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343 not found: ID does not exist" containerID="604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.474502 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343"} err="failed to get container status \"604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343\": rpc error: code = NotFound desc = could not find container \"604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343\": container with ID starting with 604e949e7cfadc30f5007da088fc3cfa12244962a3aa975dfe13afa8e748e343 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.474530 4842 scope.go:117] "RemoveContainer" containerID="6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.474808 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76\": container with ID starting with 6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76 not found: ID does not exist" containerID="6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.474832 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76"} err="failed to get container status \"6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76\": rpc error: code = NotFound desc = could not find container \"6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76\": container with ID starting with 6d9e4c1e017b31a660c7d29aa38918ec02bee1ee13ac5a60474951907317ec76 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.474848 4842 scope.go:117] "RemoveContainer" containerID="7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.481309 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.494873 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q2br9"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.504160 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.507364 4842 scope.go:117] "RemoveContainer" containerID="b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.513616 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-254hd"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.523376 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.530586 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rznqq"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.535165 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.537782 4842 scope.go:117] "RemoveContainer" containerID="0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.548919 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wgl8"] Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.554683 4842 scope.go:117] "RemoveContainer" containerID="7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.555454 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf\": container with ID starting with 7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf not found: ID does not exist" containerID="7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.555497 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf"} err="failed to get container status \"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf\": rpc error: code = NotFound desc = could not find container \"7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf\": container with ID starting with 7287e176c2a234adcc8412689cca95018f3e18532ec75723970f61e667307bdf not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.555527 4842 scope.go:117] "RemoveContainer" containerID="b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.555793 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07\": container with ID starting with b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07 not found: ID does not exist" containerID="b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.555817 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07"} err="failed to get container status \"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07\": rpc error: code = NotFound desc = could not find container \"b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07\": container with ID starting with b0dba64cd0fb727b721f79e8a35d47806eb3e1ba5c951d823f80eb4a7bb55e07 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.555833 4842 scope.go:117] "RemoveContainer" containerID="0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.556125 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c\": container with ID starting with 0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c not found: ID does not exist" containerID="0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.556167 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c"} err="failed to get container status \"0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c\": rpc error: code = NotFound desc = could not find container \"0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c\": container with ID starting with 0dea97aa67a239c0609931fbb2212e5cb91e9af2edfe4d255fc88574533dfc7c not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.556199 4842 scope.go:117] "RemoveContainer" containerID="dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.568403 4842 scope.go:117] "RemoveContainer" containerID="2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.583256 4842 scope.go:117] "RemoveContainer" containerID="0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.598917 4842 scope.go:117] "RemoveContainer" containerID="dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.599480 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6\": container with ID starting with dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6 not found: ID does not exist" containerID="dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.599586 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6"} err="failed to get container status \"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6\": rpc error: code = NotFound desc = could not find container \"dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6\": container with ID starting with dd9c4996b1c8b166621802dc31c5e2f78bca0b0e3193bfbe5da70f194f255fe6 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.599693 4842 scope.go:117] "RemoveContainer" containerID="2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.599957 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07\": container with ID starting with 2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07 not found: ID does not exist" containerID="2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.600061 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07"} err="failed to get container status \"2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07\": rpc error: code = NotFound desc = could not find container \"2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07\": container with ID starting with 2dbdc724fc4dd306cf50711b93304a18283daf52819155d232712f3cf902bb07 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.600220 4842 scope.go:117] "RemoveContainer" containerID="0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.600628 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610\": container with ID starting with 0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610 not found: ID does not exist" containerID="0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.600673 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610"} err="failed to get container status \"0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610\": rpc error: code = NotFound desc = could not find container \"0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610\": container with ID starting with 0597b54c65a009cf4bd81f15d55c30540f77f78e2bd0a99fbb1dbf0c6b9d7610 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.600708 4842 scope.go:117] "RemoveContainer" containerID="3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.621197 4842 scope.go:117] "RemoveContainer" containerID="88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.635811 4842 scope.go:117] "RemoveContainer" containerID="f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.682864 4842 scope.go:117] "RemoveContainer" containerID="3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.683367 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0\": container with ID starting with 3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0 not found: ID does not exist" containerID="3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.683409 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0"} err="failed to get container status \"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0\": rpc error: code = NotFound desc = could not find container \"3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0\": container with ID starting with 3f3a156fa3c2d97c8e015ea6aaccf8afdde53c0c86bb03c8d0570982bce633b0 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.683435 4842 scope.go:117] "RemoveContainer" containerID="88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.683807 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8\": container with ID starting with 88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8 not found: ID does not exist" containerID="88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.683904 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8"} err="failed to get container status \"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8\": rpc error: code = NotFound desc = could not find container \"88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8\": container with ID starting with 88f7ab2ec41895edbb4850b46177bfb07b54f223ec760dba9108a2cffb917ff8 not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.683984 4842 scope.go:117] "RemoveContainer" containerID="f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.684318 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a\": container with ID starting with f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a not found: ID does not exist" containerID="f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.684338 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a"} err="failed to get container status \"f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a\": rpc error: code = NotFound desc = could not find container \"f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a\": container with ID starting with f1936101629111cda1a9ff60591b12d3b0fb3cd1b1c83a090287383a181f8d0a not found: ID does not exist" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.684352 4842 scope.go:117] "RemoveContainer" containerID="39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.698792 4842 scope.go:117] "RemoveContainer" containerID="39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed" Nov 11 13:36:02 crc kubenswrapper[4842]: E1111 13:36:02.699363 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed\": container with ID starting with 39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed not found: ID does not exist" containerID="39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed" Nov 11 13:36:02 crc kubenswrapper[4842]: I1111 13:36:02.699412 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed"} err="failed to get container status \"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed\": rpc error: code = NotFound desc = could not find container \"39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed\": container with ID starting with 39c17c34423e009ef974d0c3e0d24ba73436a1d8811a3a0ed184b6e7ba358eed not found: ID does not exist" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.433308 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.437063 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716070 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716308 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716327 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716342 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716349 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716361 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716369 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716381 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716388 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716412 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716423 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716434 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716442 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716452 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716461 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716471 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716478 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716490 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716497 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716507 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716514 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716522 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.716529 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="extract-content" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.716539 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.718936 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="extract-utilities" Nov 11 13:36:03 crc kubenswrapper[4842]: E1111 13:36:03.718973 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.718984 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719145 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" containerName="marketplace-operator" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719165 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719180 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719190 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719201 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" containerName="registry-server" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.719911 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.721239 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.721888 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.784111 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.784229 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r5jt\" (UniqueName: \"kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.784291 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.884919 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.885285 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.885331 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r5jt\" (UniqueName: \"kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.885671 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.885910 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.904169 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r5jt\" (UniqueName: \"kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt\") pod \"redhat-marketplace-4gcmx\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.914030 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.915050 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.918227 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 11 13:36:03 crc kubenswrapper[4842]: I1111 13:36:03.922564 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.042881 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.066450 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37480fb1-03b9-4913-a336-c18363b1e85e" path="/var/lib/kubelet/pods/37480fb1-03b9-4913-a336-c18363b1e85e/volumes" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.067156 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55880057-105b-4a56-a0db-9e9aaff70a4c" path="/var/lib/kubelet/pods/55880057-105b-4a56-a0db-9e9aaff70a4c/volumes" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.067981 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5600eccf-6390-45ff-a8e3-7f72d9327145" path="/var/lib/kubelet/pods/5600eccf-6390-45ff-a8e3-7f72d9327145/volumes" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.069347 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="830221cf-75b5-4942-aa0b-3cb82d9e0222" path="/var/lib/kubelet/pods/830221cf-75b5-4942-aa0b-3cb82d9e0222/volumes" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.070075 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff8b002-18bd-499b-92d2-f739d29131e8" path="/var/lib/kubelet/pods/dff8b002-18bd-499b-92d2-f739d29131e8/volumes" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.090763 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vpvp\" (UniqueName: \"kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.090851 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.090907 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.192114 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.192168 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.192218 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vpvp\" (UniqueName: \"kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.192640 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.192859 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.210716 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vpvp\" (UniqueName: \"kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp\") pod \"redhat-operators-cthhp\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.241690 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.413767 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:36:04 crc kubenswrapper[4842]: W1111 13:36:04.420349 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3601123_3a84_4cc7_9b1a_2b56cffb00a6.slice/crio-5e70f65b5ed264a9f9dc2365c8912f3ebde3ec12c6d063ed4025f649095499c0 WatchSource:0}: Error finding container 5e70f65b5ed264a9f9dc2365c8912f3ebde3ec12c6d063ed4025f649095499c0: Status 404 returned error can't find the container with id 5e70f65b5ed264a9f9dc2365c8912f3ebde3ec12c6d063ed4025f649095499c0 Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.439050 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerStarted","Data":"5e70f65b5ed264a9f9dc2365c8912f3ebde3ec12c6d063ed4025f649095499c0"} Nov 11 13:36:04 crc kubenswrapper[4842]: I1111 13:36:04.597637 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:36:04 crc kubenswrapper[4842]: W1111 13:36:04.612890 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd55de82_0278_4627_870c_9b09edbca3d5.slice/crio-a46b7e9b711b9bbaf40760ee7a50091598453a0e384e519e031b8df12c9aa63c WatchSource:0}: Error finding container a46b7e9b711b9bbaf40760ee7a50091598453a0e384e519e031b8df12c9aa63c: Status 404 returned error can't find the container with id a46b7e9b711b9bbaf40760ee7a50091598453a0e384e519e031b8df12c9aa63c Nov 11 13:36:05 crc kubenswrapper[4842]: I1111 13:36:05.447155 4842 generic.go:334] "Generic (PLEG): container finished" podID="fd55de82-0278-4627-870c-9b09edbca3d5" containerID="6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370" exitCode=0 Nov 11 13:36:05 crc kubenswrapper[4842]: I1111 13:36:05.448327 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerDied","Data":"6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370"} Nov 11 13:36:05 crc kubenswrapper[4842]: I1111 13:36:05.448446 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerStarted","Data":"a46b7e9b711b9bbaf40760ee7a50091598453a0e384e519e031b8df12c9aa63c"} Nov 11 13:36:05 crc kubenswrapper[4842]: I1111 13:36:05.453847 4842 generic.go:334] "Generic (PLEG): container finished" podID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerID="10b9ee229f39dc18eb09b078fbddcc08016c7d90cbfc87d19ba5f8ffc46bdb5b" exitCode=0 Nov 11 13:36:05 crc kubenswrapper[4842]: I1111 13:36:05.453941 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerDied","Data":"10b9ee229f39dc18eb09b078fbddcc08016c7d90cbfc87d19ba5f8ffc46bdb5b"} Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.108723 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.110014 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.111879 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.118416 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.119004 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.119025 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.119044 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkm66\" (UniqueName: \"kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.219869 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.220213 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.220242 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkm66\" (UniqueName: \"kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.220617 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.220638 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.240731 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkm66\" (UniqueName: \"kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66\") pod \"community-operators-5frqb\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.313412 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.314713 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.316638 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.319236 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.421940 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.422000 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.422159 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d7vb\" (UniqueName: \"kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.460379 4842 generic.go:334] "Generic (PLEG): container finished" podID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerID="d84734d34147f77c646766a40a755527dc9fe6f9d57fe133aff2b707396859c8" exitCode=0 Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.460440 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerDied","Data":"d84734d34147f77c646766a40a755527dc9fe6f9d57fe133aff2b707396859c8"} Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.460731 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.465972 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerStarted","Data":"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4"} Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.522895 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.522992 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d7vb\" (UniqueName: \"kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.523019 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.523479 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.524269 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.542974 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d7vb\" (UniqueName: \"kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb\") pod \"certified-operators-nk726\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.738626 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:06 crc kubenswrapper[4842]: I1111 13:36:06.849563 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.111714 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:36:07 crc kubenswrapper[4842]: W1111 13:36:07.114447 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b47536f_3d40_4117_bb3c_b7751e6bcc16.slice/crio-eab07e99933087250f04c1b1be72ae167d1692d9680ce1804b4b14bd9f5a89d5 WatchSource:0}: Error finding container eab07e99933087250f04c1b1be72ae167d1692d9680ce1804b4b14bd9f5a89d5: Status 404 returned error can't find the container with id eab07e99933087250f04c1b1be72ae167d1692d9680ce1804b4b14bd9f5a89d5 Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.471695 4842 generic.go:334] "Generic (PLEG): container finished" podID="fd55de82-0278-4627-870c-9b09edbca3d5" containerID="d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4" exitCode=0 Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.471759 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerDied","Data":"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.472809 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa5e095a-f74a-470d-9070-769a25b3299d" containerID="5ccefd7a742b43414add59b00f1abfe6ae076a59aa7b2f571d0ee5cbc631012e" exitCode=0 Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.472864 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerDied","Data":"5ccefd7a742b43414add59b00f1abfe6ae076a59aa7b2f571d0ee5cbc631012e"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.472880 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerStarted","Data":"519a47887aed62c9ac8c33b41c69378237ddbd92e1096034d7354ab58c5f7b3b"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.479157 4842 generic.go:334] "Generic (PLEG): container finished" podID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerID="e8307f0c7a1d9dfa1498264a9f0bc93d72a8e95914a2eec3f21d3dd25d96e480" exitCode=0 Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.479201 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerDied","Data":"e8307f0c7a1d9dfa1498264a9f0bc93d72a8e95914a2eec3f21d3dd25d96e480"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.479255 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerStarted","Data":"eab07e99933087250f04c1b1be72ae167d1692d9680ce1804b4b14bd9f5a89d5"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.482775 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerStarted","Data":"9fe3582ac4652fc88bf16a93154f9f2fe15cb2d184f9fb3c79008775907ac2cd"} Nov 11 13:36:07 crc kubenswrapper[4842]: I1111 13:36:07.545324 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4gcmx" podStartSLOduration=2.7462175220000002 podStartE2EDuration="4.545303056s" podCreationTimestamp="2025-11-11 13:36:03 +0000 UTC" firstStartedPulling="2025-11-11 13:36:05.455355519 +0000 UTC m=+376.115645138" lastFinishedPulling="2025-11-11 13:36:07.254441053 +0000 UTC m=+377.914730672" observedRunningTime="2025-11-11 13:36:07.542554033 +0000 UTC m=+378.202843662" watchObservedRunningTime="2025-11-11 13:36:07.545303056 +0000 UTC m=+378.205592675" Nov 11 13:36:08 crc kubenswrapper[4842]: I1111 13:36:08.490487 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerStarted","Data":"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf"} Nov 11 13:36:08 crc kubenswrapper[4842]: I1111 13:36:08.500354 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa5e095a-f74a-470d-9070-769a25b3299d" containerID="269eeab4b52f05b6e766111e6764cc3dfbce241a4c6ede6e8477efab07efd25c" exitCode=0 Nov 11 13:36:08 crc kubenswrapper[4842]: I1111 13:36:08.500436 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerDied","Data":"269eeab4b52f05b6e766111e6764cc3dfbce241a4c6ede6e8477efab07efd25c"} Nov 11 13:36:08 crc kubenswrapper[4842]: I1111 13:36:08.502859 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerStarted","Data":"eb58a7e758f32f36e5e91036e13dafb06b66e42219be7969f2944cecda66fac6"} Nov 11 13:36:08 crc kubenswrapper[4842]: I1111 13:36:08.530208 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cthhp" podStartSLOduration=3.107394163 podStartE2EDuration="5.530191216s" podCreationTimestamp="2025-11-11 13:36:03 +0000 UTC" firstStartedPulling="2025-11-11 13:36:05.451122487 +0000 UTC m=+376.111412096" lastFinishedPulling="2025-11-11 13:36:07.87391953 +0000 UTC m=+378.534209149" observedRunningTime="2025-11-11 13:36:08.509521989 +0000 UTC m=+379.169811608" watchObservedRunningTime="2025-11-11 13:36:08.530191216 +0000 UTC m=+379.190480835" Nov 11 13:36:09 crc kubenswrapper[4842]: I1111 13:36:09.528309 4842 generic.go:334] "Generic (PLEG): container finished" podID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerID="eb58a7e758f32f36e5e91036e13dafb06b66e42219be7969f2944cecda66fac6" exitCode=0 Nov 11 13:36:09 crc kubenswrapper[4842]: I1111 13:36:09.528472 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerDied","Data":"eb58a7e758f32f36e5e91036e13dafb06b66e42219be7969f2944cecda66fac6"} Nov 11 13:36:09 crc kubenswrapper[4842]: I1111 13:36:09.541950 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerStarted","Data":"403583adc84de5b663c09b6fc723fcedeb9a9abe7a22ef9dd976b15b437c3a63"} Nov 11 13:36:09 crc kubenswrapper[4842]: I1111 13:36:09.577821 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5frqb" podStartSLOduration=2.158484451 podStartE2EDuration="3.577797807s" podCreationTimestamp="2025-11-11 13:36:06 +0000 UTC" firstStartedPulling="2025-11-11 13:36:07.476883012 +0000 UTC m=+378.137172631" lastFinishedPulling="2025-11-11 13:36:08.896196368 +0000 UTC m=+379.556485987" observedRunningTime="2025-11-11 13:36:09.574247078 +0000 UTC m=+380.234536697" watchObservedRunningTime="2025-11-11 13:36:09.577797807 +0000 UTC m=+380.238087426" Nov 11 13:36:11 crc kubenswrapper[4842]: I1111 13:36:11.554566 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerStarted","Data":"73398d067055fe2e71719fa59514d9ebabe150c55ee2d5f99440103705a05797"} Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.043920 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.044259 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.082464 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.103205 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nk726" podStartSLOduration=5.6206724789999996 podStartE2EDuration="8.103185891s" podCreationTimestamp="2025-11-11 13:36:06 +0000 UTC" firstStartedPulling="2025-11-11 13:36:07.480285167 +0000 UTC m=+378.140574786" lastFinishedPulling="2025-11-11 13:36:09.962798579 +0000 UTC m=+380.623088198" observedRunningTime="2025-11-11 13:36:11.573414417 +0000 UTC m=+382.233704046" watchObservedRunningTime="2025-11-11 13:36:14.103185891 +0000 UTC m=+384.763475510" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.242420 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.242470 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.277741 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.609195 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.611071 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.961491 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:36:14 crc kubenswrapper[4842]: I1111 13:36:14.961571 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.461166 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.461266 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.502579 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.621962 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.739019 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.739072 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:16 crc kubenswrapper[4842]: I1111 13:36:16.777801 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:17 crc kubenswrapper[4842]: I1111 13:36:17.622530 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:36:44 crc kubenswrapper[4842]: I1111 13:36:44.961482 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:36:44 crc kubenswrapper[4842]: I1111 13:36:44.962087 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:36:44 crc kubenswrapper[4842]: I1111 13:36:44.962169 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:36:44 crc kubenswrapper[4842]: I1111 13:36:44.963135 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:36:44 crc kubenswrapper[4842]: I1111 13:36:44.963205 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d" gracePeriod=600 Nov 11 13:36:45 crc kubenswrapper[4842]: I1111 13:36:45.733251 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d" exitCode=0 Nov 11 13:36:45 crc kubenswrapper[4842]: I1111 13:36:45.733369 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d"} Nov 11 13:36:45 crc kubenswrapper[4842]: I1111 13:36:45.733618 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07"} Nov 11 13:36:45 crc kubenswrapper[4842]: I1111 13:36:45.733648 4842 scope.go:117] "RemoveContainer" containerID="bd70be3175eb0174945b645b3f7188ec86c4b78c7c0b47b8d73a0493e5918e3a" Nov 11 13:37:59 crc kubenswrapper[4842]: I1111 13:37:59.965636 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xzhtt"] Nov 11 13:37:59 crc kubenswrapper[4842]: I1111 13:37:59.966734 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:37:59 crc kubenswrapper[4842]: I1111 13:37:59.983606 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xzhtt"] Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108026 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hf9j\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-kube-api-access-4hf9j\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108245 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-bound-sa-token\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108471 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108726 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-registry-tls\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108801 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-trusted-ca\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.108997 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97355381-7860-4f2f-94a6-b852c10c4d04-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.109041 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-registry-certificates\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.109069 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97355381-7860-4f2f-94a6-b852c10c4d04-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.134974 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210815 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97355381-7860-4f2f-94a6-b852c10c4d04-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210858 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-registry-certificates\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210877 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97355381-7860-4f2f-94a6-b852c10c4d04-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210902 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hf9j\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-kube-api-access-4hf9j\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210920 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-bound-sa-token\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210965 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-registry-tls\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.210984 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-trusted-ca\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.211687 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97355381-7860-4f2f-94a6-b852c10c4d04-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.212406 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-trusted-ca\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.212665 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97355381-7860-4f2f-94a6-b852c10c4d04-registry-certificates\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.217258 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97355381-7860-4f2f-94a6-b852c10c4d04-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.221271 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-registry-tls\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.225679 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hf9j\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-kube-api-access-4hf9j\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.228002 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97355381-7860-4f2f-94a6-b852c10c4d04-bound-sa-token\") pod \"image-registry-66df7c8f76-xzhtt\" (UID: \"97355381-7860-4f2f-94a6-b852c10c4d04\") " pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.282584 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:00 crc kubenswrapper[4842]: I1111 13:38:00.468480 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xzhtt"] Nov 11 13:38:00 crc kubenswrapper[4842]: W1111 13:38:00.477508 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97355381_7860_4f2f_94a6_b852c10c4d04.slice/crio-5bdabd8852e81cce0ba3aa2a59bf7f28f7cba7a56e952820109b2b27690c6192 WatchSource:0}: Error finding container 5bdabd8852e81cce0ba3aa2a59bf7f28f7cba7a56e952820109b2b27690c6192: Status 404 returned error can't find the container with id 5bdabd8852e81cce0ba3aa2a59bf7f28f7cba7a56e952820109b2b27690c6192 Nov 11 13:38:01 crc kubenswrapper[4842]: I1111 13:38:01.171515 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" event={"ID":"97355381-7860-4f2f-94a6-b852c10c4d04","Type":"ContainerStarted","Data":"781306ee0ba748bd250fed5fdfbcfc1f807541bcfbbb3fe0fa0295612337b9ac"} Nov 11 13:38:01 crc kubenswrapper[4842]: I1111 13:38:01.171864 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" event={"ID":"97355381-7860-4f2f-94a6-b852c10c4d04","Type":"ContainerStarted","Data":"5bdabd8852e81cce0ba3aa2a59bf7f28f7cba7a56e952820109b2b27690c6192"} Nov 11 13:38:01 crc kubenswrapper[4842]: I1111 13:38:01.171884 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:01 crc kubenswrapper[4842]: I1111 13:38:01.189796 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" podStartSLOduration=2.189774694 podStartE2EDuration="2.189774694s" podCreationTimestamp="2025-11-11 13:37:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:38:01.187914714 +0000 UTC m=+491.848204333" watchObservedRunningTime="2025-11-11 13:38:01.189774694 +0000 UTC m=+491.850064313" Nov 11 13:38:20 crc kubenswrapper[4842]: I1111 13:38:20.289832 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-xzhtt" Nov 11 13:38:20 crc kubenswrapper[4842]: I1111 13:38:20.365932 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.399179 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" podUID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" containerName="registry" containerID="cri-o://d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13" gracePeriod=30 Nov 11 13:38:45 crc kubenswrapper[4842]: E1111 13:38:45.527832 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod102c5e64_9ad0_4dc2_a6cf_bac2683db16e.slice/crio-conmon-d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod102c5e64_9ad0_4dc2_a6cf_bac2683db16e.slice/crio-d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13.scope\": RecentStats: unable to find data in memory cache]" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.588400 4842 generic.go:334] "Generic (PLEG): container finished" podID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" containerID="d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13" exitCode=0 Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.588444 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" event={"ID":"102c5e64-9ad0-4dc2-a6cf-bac2683db16e","Type":"ContainerDied","Data":"d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13"} Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.741349 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897324 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897377 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897443 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897477 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897502 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897526 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897640 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.897659 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxz7z\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z\") pod \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\" (UID: \"102c5e64-9ad0-4dc2-a6cf-bac2683db16e\") " Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.898674 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.899374 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.903242 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.903753 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.906925 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.909211 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z" (OuterVolumeSpecName: "kube-api-access-zxz7z") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "kube-api-access-zxz7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.910204 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.915042 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "102c5e64-9ad0-4dc2-a6cf-bac2683db16e" (UID: "102c5e64-9ad0-4dc2-a6cf-bac2683db16e"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999300 4842 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999332 4842 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999345 4842 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999353 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999363 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxz7z\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-kube-api-access-zxz7z\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999372 4842 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:45 crc kubenswrapper[4842]: I1111 13:38:45.999381 4842 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/102c5e64-9ad0-4dc2-a6cf-bac2683db16e-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 11 13:38:46 crc kubenswrapper[4842]: I1111 13:38:46.595018 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" event={"ID":"102c5e64-9ad0-4dc2-a6cf-bac2683db16e","Type":"ContainerDied","Data":"d6c50388a069f5f0f7a553527dcde411d7c0820dab06a3b167e00fc64a845980"} Nov 11 13:38:46 crc kubenswrapper[4842]: I1111 13:38:46.596293 4842 scope.go:117] "RemoveContainer" containerID="d7f7f059592e3c64a0f2e7943f5fc96b38ecfe93bbf13b1b2db9123b4dcd1f13" Nov 11 13:38:46 crc kubenswrapper[4842]: I1111 13:38:46.595150 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-d9vpf" Nov 11 13:38:46 crc kubenswrapper[4842]: I1111 13:38:46.616469 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:38:46 crc kubenswrapper[4842]: I1111 13:38:46.624521 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-d9vpf"] Nov 11 13:38:48 crc kubenswrapper[4842]: I1111 13:38:48.068278 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" path="/var/lib/kubelet/pods/102c5e64-9ad0-4dc2-a6cf-bac2683db16e/volumes" Nov 11 13:39:14 crc kubenswrapper[4842]: I1111 13:39:14.961244 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:39:14 crc kubenswrapper[4842]: I1111 13:39:14.961708 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:39:44 crc kubenswrapper[4842]: I1111 13:39:44.961503 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:39:44 crc kubenswrapper[4842]: I1111 13:39:44.963276 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:40:14 crc kubenswrapper[4842]: I1111 13:40:14.961518 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:40:14 crc kubenswrapper[4842]: I1111 13:40:14.962050 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:40:14 crc kubenswrapper[4842]: I1111 13:40:14.962095 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:40:14 crc kubenswrapper[4842]: I1111 13:40:14.962697 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:40:14 crc kubenswrapper[4842]: I1111 13:40:14.962748 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07" gracePeriod=600 Nov 11 13:40:16 crc kubenswrapper[4842]: I1111 13:40:16.042716 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07" exitCode=0 Nov 11 13:40:16 crc kubenswrapper[4842]: I1111 13:40:16.042787 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07"} Nov 11 13:40:16 crc kubenswrapper[4842]: I1111 13:40:16.043077 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192"} Nov 11 13:40:16 crc kubenswrapper[4842]: I1111 13:40:16.043124 4842 scope.go:117] "RemoveContainer" containerID="516a8b13b309b25528f80c0f33b222ce57df00d371d2f4ed05bbeb810d1c667d" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.741912 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5ks7v"] Nov 11 13:41:24 crc kubenswrapper[4842]: E1111 13:41:24.743358 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" containerName="registry" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.743384 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" containerName="registry" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.743588 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="102c5e64-9ad0-4dc2-a6cf-bac2683db16e" containerName="registry" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.744449 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.747540 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5ks7v"] Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.754213 4842 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-99swz" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.754421 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.754556 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.756471 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fwg9f"] Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.757186 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-fwg9f" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.759662 4842 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-c9bc4" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.762008 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fwg9f"] Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.783738 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-djwf2"] Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.784573 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.790269 4842 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-dgfww" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.794885 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-djwf2"] Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.803419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdzdk\" (UniqueName: \"kubernetes.io/projected/14c43b93-a309-434e-8379-c48dca27130f-kube-api-access-xdzdk\") pod \"cert-manager-5b446d88c5-fwg9f\" (UID: \"14c43b93-a309-434e-8379-c48dca27130f\") " pod="cert-manager/cert-manager-5b446d88c5-fwg9f" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.804553 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt5pw\" (UniqueName: \"kubernetes.io/projected/9548e4cc-cf32-4973-bb29-5525fee6d3e8-kube-api-access-dt5pw\") pod \"cert-manager-webhook-5655c58dd6-djwf2\" (UID: \"9548e4cc-cf32-4973-bb29-5525fee6d3e8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.804609 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpdss\" (UniqueName: \"kubernetes.io/projected/32eb5634-bee2-4ae4-89ad-cad4e90a79d1-kube-api-access-rpdss\") pod \"cert-manager-cainjector-7f985d654d-5ks7v\" (UID: \"32eb5634-bee2-4ae4-89ad-cad4e90a79d1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.905809 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdzdk\" (UniqueName: \"kubernetes.io/projected/14c43b93-a309-434e-8379-c48dca27130f-kube-api-access-xdzdk\") pod \"cert-manager-5b446d88c5-fwg9f\" (UID: \"14c43b93-a309-434e-8379-c48dca27130f\") " pod="cert-manager/cert-manager-5b446d88c5-fwg9f" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.905866 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5pw\" (UniqueName: \"kubernetes.io/projected/9548e4cc-cf32-4973-bb29-5525fee6d3e8-kube-api-access-dt5pw\") pod \"cert-manager-webhook-5655c58dd6-djwf2\" (UID: \"9548e4cc-cf32-4973-bb29-5525fee6d3e8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.905896 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpdss\" (UniqueName: \"kubernetes.io/projected/32eb5634-bee2-4ae4-89ad-cad4e90a79d1-kube-api-access-rpdss\") pod \"cert-manager-cainjector-7f985d654d-5ks7v\" (UID: \"32eb5634-bee2-4ae4-89ad-cad4e90a79d1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.923538 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdzdk\" (UniqueName: \"kubernetes.io/projected/14c43b93-a309-434e-8379-c48dca27130f-kube-api-access-xdzdk\") pod \"cert-manager-5b446d88c5-fwg9f\" (UID: \"14c43b93-a309-434e-8379-c48dca27130f\") " pod="cert-manager/cert-manager-5b446d88c5-fwg9f" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.923674 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt5pw\" (UniqueName: \"kubernetes.io/projected/9548e4cc-cf32-4973-bb29-5525fee6d3e8-kube-api-access-dt5pw\") pod \"cert-manager-webhook-5655c58dd6-djwf2\" (UID: \"9548e4cc-cf32-4973-bb29-5525fee6d3e8\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:24 crc kubenswrapper[4842]: I1111 13:41:24.924286 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpdss\" (UniqueName: \"kubernetes.io/projected/32eb5634-bee2-4ae4-89ad-cad4e90a79d1-kube-api-access-rpdss\") pod \"cert-manager-cainjector-7f985d654d-5ks7v\" (UID: \"32eb5634-bee2-4ae4-89ad-cad4e90a79d1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.068441 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.078200 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-fwg9f" Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.102827 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.262139 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fwg9f"] Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.276730 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.302754 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-5ks7v"] Nov 11 13:41:25 crc kubenswrapper[4842]: W1111 13:41:25.312335 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32eb5634_bee2_4ae4_89ad_cad4e90a79d1.slice/crio-2066c267056caf495ef833bd3e57b0754144c178619f128c8eaa7af78de0ac23 WatchSource:0}: Error finding container 2066c267056caf495ef833bd3e57b0754144c178619f128c8eaa7af78de0ac23: Status 404 returned error can't find the container with id 2066c267056caf495ef833bd3e57b0754144c178619f128c8eaa7af78de0ac23 Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.334906 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-djwf2"] Nov 11 13:41:25 crc kubenswrapper[4842]: W1111 13:41:25.340276 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9548e4cc_cf32_4973_bb29_5525fee6d3e8.slice/crio-cdec33d397717a9523161b2724a21caec30a80a138c9b5685d98212c68cec586 WatchSource:0}: Error finding container cdec33d397717a9523161b2724a21caec30a80a138c9b5685d98212c68cec586: Status 404 returned error can't find the container with id cdec33d397717a9523161b2724a21caec30a80a138c9b5685d98212c68cec586 Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.375894 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-fwg9f" event={"ID":"14c43b93-a309-434e-8379-c48dca27130f","Type":"ContainerStarted","Data":"6c324075c456e2d65b41d3b5d67897e20355e5f8ff5134ced848a5c15255c1eb"} Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.376883 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" event={"ID":"9548e4cc-cf32-4973-bb29-5525fee6d3e8","Type":"ContainerStarted","Data":"cdec33d397717a9523161b2724a21caec30a80a138c9b5685d98212c68cec586"} Nov 11 13:41:25 crc kubenswrapper[4842]: I1111 13:41:25.378446 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" event={"ID":"32eb5634-bee2-4ae4-89ad-cad4e90a79d1","Type":"ContainerStarted","Data":"2066c267056caf495ef833bd3e57b0754144c178619f128c8eaa7af78de0ac23"} Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.400219 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-fwg9f" event={"ID":"14c43b93-a309-434e-8379-c48dca27130f","Type":"ContainerStarted","Data":"7aaab63ad16b88892cac06b019e0799f6a9b1125c803e05d29d44457069962e6"} Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.401468 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" event={"ID":"9548e4cc-cf32-4973-bb29-5525fee6d3e8","Type":"ContainerStarted","Data":"bb27b384e76b603cf5f105fa930ade1e28cdc08240f73a5df1da3662a78c0058"} Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.402057 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.403946 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" event={"ID":"32eb5634-bee2-4ae4-89ad-cad4e90a79d1","Type":"ContainerStarted","Data":"21664adffb032cbf3e606a4dced766a56a2d08ffbf370d65fd10fd91821ddf31"} Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.417227 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-fwg9f" podStartSLOduration=2.42122952 podStartE2EDuration="5.417204769s" podCreationTimestamp="2025-11-11 13:41:24 +0000 UTC" firstStartedPulling="2025-11-11 13:41:25.276060168 +0000 UTC m=+695.936349787" lastFinishedPulling="2025-11-11 13:41:28.272035417 +0000 UTC m=+698.932325036" observedRunningTime="2025-11-11 13:41:29.413396752 +0000 UTC m=+700.073686401" watchObservedRunningTime="2025-11-11 13:41:29.417204769 +0000 UTC m=+700.077494398" Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.430139 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-5ks7v" podStartSLOduration=2.579787254 podStartE2EDuration="5.430121864s" podCreationTimestamp="2025-11-11 13:41:24 +0000 UTC" firstStartedPulling="2025-11-11 13:41:25.318905237 +0000 UTC m=+695.979194856" lastFinishedPulling="2025-11-11 13:41:28.169239847 +0000 UTC m=+698.829529466" observedRunningTime="2025-11-11 13:41:29.429418762 +0000 UTC m=+700.089708391" watchObservedRunningTime="2025-11-11 13:41:29.430121864 +0000 UTC m=+700.090411483" Nov 11 13:41:29 crc kubenswrapper[4842]: I1111 13:41:29.445438 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" podStartSLOduration=2.560202205 podStartE2EDuration="5.44541448s" podCreationTimestamp="2025-11-11 13:41:24 +0000 UTC" firstStartedPulling="2025-11-11 13:41:25.341694103 +0000 UTC m=+696.001983722" lastFinishedPulling="2025-11-11 13:41:28.226906378 +0000 UTC m=+698.887195997" observedRunningTime="2025-11-11 13:41:29.441232733 +0000 UTC m=+700.101522372" watchObservedRunningTime="2025-11-11 13:41:29.44541448 +0000 UTC m=+700.105704099" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.105468 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-djwf2" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390195 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dzhjw"] Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390636 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-controller" containerID="cri-o://477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390732 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390773 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-node" containerID="cri-o://70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390869 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-acl-logging" containerID="cri-o://de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390845 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="northd" containerID="cri-o://544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390942 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="sbdb" containerID="cri-o://0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.390732 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="nbdb" containerID="cri-o://f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.419199 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" containerID="cri-o://d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" gracePeriod=30 Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.675165 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/3.log" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.677989 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovn-acl-logging/0.log" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.678462 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovn-controller/0.log" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.678911 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.726634 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-llp4n"] Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.726865 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kubecfg-setup" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.726879 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kubecfg-setup" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.726894 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-acl-logging" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.726902 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-acl-logging" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.726960 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-ovn-metrics" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.726970 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-ovn-metrics" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.726984 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="northd" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.726991 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="northd" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727001 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727009 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727020 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727028 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727037 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727045 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727059 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-node" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727067 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-node" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727079 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="sbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727086 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="sbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727113 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727121 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727131 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="nbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727137 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="nbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727266 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="northd" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727279 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="nbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727289 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-acl-logging" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727298 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727310 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727320 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-ovn-metrics" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727329 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727335 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727343 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727351 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovn-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727361 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="sbdb" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727370 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="kube-rbac-proxy-node" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727481 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727491 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: E1111 13:41:35.727502 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.727509 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerName="ovnkube-controller" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.730144 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741415 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741472 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzc59\" (UniqueName: \"kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741503 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741529 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741552 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741580 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741603 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741627 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741654 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741677 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741698 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741717 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741747 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741767 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741790 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741810 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741830 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741853 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741875 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741905 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config\") pod \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\" (UID: \"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495\") " Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741941 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741974 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741992 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741994 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-netns\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741979 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.741994 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash" (OuterVolumeSpecName: "host-slash") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742005 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742013 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742014 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742023 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log" (OuterVolumeSpecName: "node-log") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742044 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742030 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742069 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742090 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket" (OuterVolumeSpecName: "log-socket") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742282 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-systemd-units\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742353 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-ovn\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742359 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742448 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742431 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742479 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742525 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-slash\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742490 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742547 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-kubelet\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742566 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovn-node-metrics-cert\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742585 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-log-socket\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742603 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x98f\" (UniqueName: \"kubernetes.io/projected/7eebfa6f-d1c3-4169-a2c1-680069e512c1-kube-api-access-9x98f\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742622 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-var-lib-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-etc-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742653 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-config\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742667 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-env-overrides\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742682 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-systemd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742718 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742735 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-bin\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742781 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742811 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-node-log\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742859 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-script-lib\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742887 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-netd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742948 4842 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742962 4842 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742975 4842 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742986 4842 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-node-log\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.742998 4842 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743009 4842 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743022 4842 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743036 4842 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743050 4842 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743063 4842 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743074 4842 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-log-socket\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743086 4842 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743142 4842 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743156 4842 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743170 4842 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743182 4842 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-slash\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.743193 4842 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.748675 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59" (OuterVolumeSpecName: "kube-api-access-nzc59") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "kube-api-access-nzc59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.749332 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.760789 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" (UID: "d8bdbe88-f5ed-4117-92ea-6e1f45f6b495"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844723 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-var-lib-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844773 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-etc-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844802 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-env-overrides\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844799 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-var-lib-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844824 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-config\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844844 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-systemd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844867 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844887 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-bin\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844910 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844928 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-node-log\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844963 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-script-lib\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.844984 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-netd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845021 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-netns\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845039 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-systemd-units\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845060 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-ovn\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845079 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845120 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-slash\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845146 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-kubelet\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845169 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovn-node-metrics-cert\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845194 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-log-socket\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845218 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x98f\" (UniqueName: \"kubernetes.io/projected/7eebfa6f-d1c3-4169-a2c1-680069e512c1-kube-api-access-9x98f\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845233 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-etc-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845264 4842 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845279 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzc59\" (UniqueName: \"kubernetes.io/projected/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-kube-api-access-nzc59\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845291 4842 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845288 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-netns\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845313 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-node-log\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845338 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-systemd-units\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845368 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-ovn\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845379 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-openvswitch\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845421 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-slash\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845398 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845453 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-kubelet\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845469 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-run-systemd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845495 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-run-ovn-kubernetes\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845519 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-bin\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845543 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-host-cni-netd\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845565 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7eebfa6f-d1c3-4169-a2c1-680069e512c1-log-socket\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845728 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-config\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.845845 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovnkube-script-lib\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.846324 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7eebfa6f-d1c3-4169-a2c1-680069e512c1-env-overrides\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.848427 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7eebfa6f-d1c3-4169-a2c1-680069e512c1-ovn-node-metrics-cert\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:35 crc kubenswrapper[4842]: I1111 13:41:35.861071 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x98f\" (UniqueName: \"kubernetes.io/projected/7eebfa6f-d1c3-4169-a2c1-680069e512c1-kube-api-access-9x98f\") pod \"ovnkube-node-llp4n\" (UID: \"7eebfa6f-d1c3-4169-a2c1-680069e512c1\") " pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.042425 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.442252 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovnkube-controller/3.log" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.444396 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovn-acl-logging/0.log" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.444836 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dzhjw_d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/ovn-controller/0.log" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445241 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445297 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445309 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445317 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445324 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445332 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445341 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" exitCode=143 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445355 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" exitCode=143 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445355 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445327 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445477 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445504 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445517 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445528 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445542 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445546 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445557 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445571 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445578 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445584 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445592 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445599 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445605 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445610 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445615 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445623 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445631 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445637 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445643 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445648 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445654 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445659 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445665 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445670 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445676 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445682 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445689 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445696 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445702 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445707 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445713 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445718 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445724 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445729 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445734 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445739 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445744 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445751 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dzhjw" event={"ID":"d8bdbe88-f5ed-4117-92ea-6e1f45f6b495","Type":"ContainerDied","Data":"b253f6b4c2870b007716b6cacd111c41ea996b1dac1c6c0dd61b757cf11fe8d3"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445761 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445767 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445771 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445776 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445781 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445786 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445792 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445799 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445806 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.445812 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.447178 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/2.log" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.448475 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/1.log" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.448525 4842 generic.go:334] "Generic (PLEG): container finished" podID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" containerID="3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b" exitCode=2 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.448653 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerDied","Data":"3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.448702 4842 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.449458 4842 scope.go:117] "RemoveContainer" containerID="3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.449822 4842 generic.go:334] "Generic (PLEG): container finished" podID="7eebfa6f-d1c3-4169-a2c1-680069e512c1" containerID="7ec67179553d4c88b193f795d941c77a99c9b49f26a5ddb657f1a8be2d6235ee" exitCode=0 Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.449844 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerDied","Data":"7ec67179553d4c88b193f795d941c77a99c9b49f26a5ddb657f1a8be2d6235ee"} Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.449860 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"8dd52df337164ef94fb158ed82bbb16d744d68666540f15db26b549f2ce55be5"} Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.449872 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-mggn5_openshift-multus(a899ee4d-e1d3-44cc-a780-2dac60da21eb)\"" pod="openshift-multus/multus-mggn5" podUID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.463068 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.491283 4842 scope.go:117] "RemoveContainer" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.509574 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dzhjw"] Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.515245 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dzhjw"] Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.520573 4842 scope.go:117] "RemoveContainer" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.549836 4842 scope.go:117] "RemoveContainer" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.561065 4842 scope.go:117] "RemoveContainer" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.574095 4842 scope.go:117] "RemoveContainer" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.585919 4842 scope.go:117] "RemoveContainer" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.597819 4842 scope.go:117] "RemoveContainer" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.610951 4842 scope.go:117] "RemoveContainer" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628115 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.628461 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628503 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} err="failed to get container status \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628527 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.628723 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": container with ID starting with a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412 not found: ID does not exist" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628746 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} err="failed to get container status \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": rpc error: code = NotFound desc = could not find container \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": container with ID starting with a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628759 4842 scope.go:117] "RemoveContainer" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.628947 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": container with ID starting with 0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc not found: ID does not exist" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628975 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} err="failed to get container status \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": rpc error: code = NotFound desc = could not find container \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": container with ID starting with 0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.628993 4842 scope.go:117] "RemoveContainer" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.629233 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": container with ID starting with f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca not found: ID does not exist" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.629255 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} err="failed to get container status \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": rpc error: code = NotFound desc = could not find container \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": container with ID starting with f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.629269 4842 scope.go:117] "RemoveContainer" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.633642 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": container with ID starting with 544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5 not found: ID does not exist" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.633774 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} err="failed to get container status \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": rpc error: code = NotFound desc = could not find container \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": container with ID starting with 544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.633802 4842 scope.go:117] "RemoveContainer" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.634116 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": container with ID starting with c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4 not found: ID does not exist" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634137 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} err="failed to get container status \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": rpc error: code = NotFound desc = could not find container \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": container with ID starting with c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634156 4842 scope.go:117] "RemoveContainer" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.634438 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": container with ID starting with 70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9 not found: ID does not exist" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634459 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} err="failed to get container status \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": rpc error: code = NotFound desc = could not find container \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": container with ID starting with 70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634473 4842 scope.go:117] "RemoveContainer" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.634686 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": container with ID starting with de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a not found: ID does not exist" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634718 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} err="failed to get container status \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": rpc error: code = NotFound desc = could not find container \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": container with ID starting with de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.634736 4842 scope.go:117] "RemoveContainer" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.635086 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": container with ID starting with 477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668 not found: ID does not exist" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.635118 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} err="failed to get container status \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": rpc error: code = NotFound desc = could not find container \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": container with ID starting with 477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.635131 4842 scope.go:117] "RemoveContainer" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: E1111 13:41:36.640262 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": container with ID starting with 030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c not found: ID does not exist" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640313 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} err="failed to get container status \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": rpc error: code = NotFound desc = could not find container \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": container with ID starting with 030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640341 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640653 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} err="failed to get container status \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640685 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640931 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} err="failed to get container status \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": rpc error: code = NotFound desc = could not find container \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": container with ID starting with a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.640955 4842 scope.go:117] "RemoveContainer" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641200 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} err="failed to get container status \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": rpc error: code = NotFound desc = could not find container \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": container with ID starting with 0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641220 4842 scope.go:117] "RemoveContainer" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641489 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} err="failed to get container status \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": rpc error: code = NotFound desc = could not find container \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": container with ID starting with f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641510 4842 scope.go:117] "RemoveContainer" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641842 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} err="failed to get container status \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": rpc error: code = NotFound desc = could not find container \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": container with ID starting with 544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.641866 4842 scope.go:117] "RemoveContainer" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642255 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} err="failed to get container status \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": rpc error: code = NotFound desc = could not find container \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": container with ID starting with c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642274 4842 scope.go:117] "RemoveContainer" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642532 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} err="failed to get container status \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": rpc error: code = NotFound desc = could not find container \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": container with ID starting with 70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642561 4842 scope.go:117] "RemoveContainer" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642930 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} err="failed to get container status \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": rpc error: code = NotFound desc = could not find container \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": container with ID starting with de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.642949 4842 scope.go:117] "RemoveContainer" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643209 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} err="failed to get container status \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": rpc error: code = NotFound desc = could not find container \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": container with ID starting with 477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643236 4842 scope.go:117] "RemoveContainer" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643454 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} err="failed to get container status \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": rpc error: code = NotFound desc = could not find container \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": container with ID starting with 030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643473 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643707 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} err="failed to get container status \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643729 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.643988 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} err="failed to get container status \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": rpc error: code = NotFound desc = could not find container \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": container with ID starting with a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644007 4842 scope.go:117] "RemoveContainer" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644254 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} err="failed to get container status \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": rpc error: code = NotFound desc = could not find container \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": container with ID starting with 0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644275 4842 scope.go:117] "RemoveContainer" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644522 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} err="failed to get container status \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": rpc error: code = NotFound desc = could not find container \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": container with ID starting with f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644543 4842 scope.go:117] "RemoveContainer" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644772 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} err="failed to get container status \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": rpc error: code = NotFound desc = could not find container \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": container with ID starting with 544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.644791 4842 scope.go:117] "RemoveContainer" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645018 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} err="failed to get container status \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": rpc error: code = NotFound desc = could not find container \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": container with ID starting with c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645049 4842 scope.go:117] "RemoveContainer" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645313 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} err="failed to get container status \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": rpc error: code = NotFound desc = could not find container \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": container with ID starting with 70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645332 4842 scope.go:117] "RemoveContainer" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645536 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} err="failed to get container status \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": rpc error: code = NotFound desc = could not find container \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": container with ID starting with de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645557 4842 scope.go:117] "RemoveContainer" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645890 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} err="failed to get container status \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": rpc error: code = NotFound desc = could not find container \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": container with ID starting with 477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.645934 4842 scope.go:117] "RemoveContainer" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646249 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} err="failed to get container status \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": rpc error: code = NotFound desc = could not find container \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": container with ID starting with 030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646272 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646574 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} err="failed to get container status \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646597 4842 scope.go:117] "RemoveContainer" containerID="a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646803 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412"} err="failed to get container status \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": rpc error: code = NotFound desc = could not find container \"a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412\": container with ID starting with a292d74257fc93a78a6fedf0921220ca53a5290b05e2770fcf7c9fb3b5ff2412 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.646820 4842 scope.go:117] "RemoveContainer" containerID="0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647035 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc"} err="failed to get container status \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": rpc error: code = NotFound desc = could not find container \"0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc\": container with ID starting with 0978527ef1f7ed6888376f2284fe6eacff915ca554930ab48a9e70586db48afc not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647056 4842 scope.go:117] "RemoveContainer" containerID="f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647291 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca"} err="failed to get container status \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": rpc error: code = NotFound desc = could not find container \"f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca\": container with ID starting with f8195e0084fe597b1c0cd4ab1a73d53a37b17f3a9f4e72187054e0a1e34d77ca not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647313 4842 scope.go:117] "RemoveContainer" containerID="544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647508 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5"} err="failed to get container status \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": rpc error: code = NotFound desc = could not find container \"544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5\": container with ID starting with 544be48593f0c892d58b1707e3def9e3348ccd6d133500c55ed340867b65cdf5 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647530 4842 scope.go:117] "RemoveContainer" containerID="c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647703 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4"} err="failed to get container status \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": rpc error: code = NotFound desc = could not find container \"c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4\": container with ID starting with c0ac7f01e7ce1ee6b8de725a8980f22c327ed59d755b8225ca8b604a28531fb4 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647732 4842 scope.go:117] "RemoveContainer" containerID="70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647971 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9"} err="failed to get container status \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": rpc error: code = NotFound desc = could not find container \"70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9\": container with ID starting with 70154aadb701d5345919b2bda1df9448a9fb8a63b17daf8f437415d34e0f42c9 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.647991 4842 scope.go:117] "RemoveContainer" containerID="de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648247 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a"} err="failed to get container status \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": rpc error: code = NotFound desc = could not find container \"de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a\": container with ID starting with de64ff07194734822be8fa7e30d8057b7d84c0f94c76de5bdd468ffff85fc36a not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648274 4842 scope.go:117] "RemoveContainer" containerID="477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648528 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668"} err="failed to get container status \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": rpc error: code = NotFound desc = could not find container \"477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668\": container with ID starting with 477b70d711d002ead9418a0cc0a2fc9ee74f72b35c7da7de2c5605e324502668 not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648548 4842 scope.go:117] "RemoveContainer" containerID="030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648739 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c"} err="failed to get container status \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": rpc error: code = NotFound desc = could not find container \"030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c\": container with ID starting with 030e166779102ca22e342b7fbfa554624776f378be43d1dfa404d6235b81a95c not found: ID does not exist" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648758 4842 scope.go:117] "RemoveContainer" containerID="d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2" Nov 11 13:41:36 crc kubenswrapper[4842]: I1111 13:41:36.648989 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2"} err="failed to get container status \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": rpc error: code = NotFound desc = could not find container \"d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2\": container with ID starting with d1646520fbc1d04e50b1ba2ed3ab650f3ec4e8564fd9f0c479415571a08732a2 not found: ID does not exist" Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460488 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"d6c2a4092ed517ee46ca9505368a05657660579ebd970b59f5791a5ab69f9a8e"} Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460748 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"7e1af7ba66491e552e04e3728f01262506a49c654d49d84f4fcf973fb9b43658"} Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460763 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"b0149bab7f367835631355a3469e9c5ba513d14226995c968ab05889ef50eb14"} Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460778 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"7a0addd89a79826bdae1964fcaadab47416db584ee923f7fd30abdd416ad05f7"} Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460789 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"38f22879340dee86fd9dd819abce7602cfe0197816f09b63eaae822883a92e5c"} Nov 11 13:41:37 crc kubenswrapper[4842]: I1111 13:41:37.460799 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"459c93f7671e4b163c95114bac156540f5e02cd4314de4ce6d7c9187c7ce0b99"} Nov 11 13:41:38 crc kubenswrapper[4842]: I1111 13:41:38.064676 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8bdbe88-f5ed-4117-92ea-6e1f45f6b495" path="/var/lib/kubelet/pods/d8bdbe88-f5ed-4117-92ea-6e1f45f6b495/volumes" Nov 11 13:41:39 crc kubenswrapper[4842]: I1111 13:41:39.473886 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"77d5043c327b2af9e66974aa3c30ea026eb88312587a92ec6b1237a30db6f67a"} Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.491052 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" event={"ID":"7eebfa6f-d1c3-4169-a2c1-680069e512c1","Type":"ContainerStarted","Data":"6a15724dc03e767ea0f7074bba4b0a00805f2959500c39535895b6b63ffea996"} Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.491645 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.491665 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.491676 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.521262 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.523968 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:41:42 crc kubenswrapper[4842]: I1111 13:41:42.525947 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" podStartSLOduration=7.525930657 podStartE2EDuration="7.525930657s" podCreationTimestamp="2025-11-11 13:41:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:41:42.524348268 +0000 UTC m=+713.184637907" watchObservedRunningTime="2025-11-11 13:41:42.525930657 +0000 UTC m=+713.186220276" Nov 11 13:41:47 crc kubenswrapper[4842]: I1111 13:41:47.059514 4842 scope.go:117] "RemoveContainer" containerID="3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b" Nov 11 13:41:47 crc kubenswrapper[4842]: E1111 13:41:47.060088 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-mggn5_openshift-multus(a899ee4d-e1d3-44cc-a780-2dac60da21eb)\"" pod="openshift-multus/multus-mggn5" podUID="a899ee4d-e1d3-44cc-a780-2dac60da21eb" Nov 11 13:41:50 crc kubenswrapper[4842]: I1111 13:41:50.238623 4842 scope.go:117] "RemoveContainer" containerID="969c153c2c582aeede6d044185212d20e71fc19a574cbb3bcdc41018ffe0a067" Nov 11 13:41:50 crc kubenswrapper[4842]: I1111 13:41:50.539586 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/2.log" Nov 11 13:41:58 crc kubenswrapper[4842]: I1111 13:41:58.059333 4842 scope.go:117] "RemoveContainer" containerID="3ca9f25d2b904da9dc8ce64fb2917f65c3fef7e8395347fbf0d1793e5f15643b" Nov 11 13:41:58 crc kubenswrapper[4842]: I1111 13:41:58.584564 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mggn5_a899ee4d-e1d3-44cc-a780-2dac60da21eb/kube-multus/2.log" Nov 11 13:41:58 crc kubenswrapper[4842]: I1111 13:41:58.584899 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mggn5" event={"ID":"a899ee4d-e1d3-44cc-a780-2dac60da21eb","Type":"ContainerStarted","Data":"6e4f4ea69ae90b49870892f83b44898007e61cc11f754960f053a10659a3e7fc"} Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.803594 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs"] Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.805000 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.806560 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.818432 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs"] Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.990947 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7ss2\" (UniqueName: \"kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.991010 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:03 crc kubenswrapper[4842]: I1111 13:42:03.991033 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.092084 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7ss2\" (UniqueName: \"kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.092166 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.092195 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.092626 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.092750 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.111000 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7ss2\" (UniqueName: \"kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2\") pod \"a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.127785 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.566689 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs"] Nov 11 13:42:04 crc kubenswrapper[4842]: I1111 13:42:04.615814 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerStarted","Data":"9ae80bb67ee9d8dc4fe3e5b60ef3fd97ea05a967cd2ed2711818d3ffff51b9f2"} Nov 11 13:42:05 crc kubenswrapper[4842]: I1111 13:42:05.622431 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerStarted","Data":"e4536dc21585100d43fc891985e93abf63345f57e8dddcb2f1a4bda2dfb93214"} Nov 11 13:42:06 crc kubenswrapper[4842]: I1111 13:42:06.068930 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-llp4n" Nov 11 13:42:06 crc kubenswrapper[4842]: I1111 13:42:06.629198 4842 generic.go:334] "Generic (PLEG): container finished" podID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerID="e4536dc21585100d43fc891985e93abf63345f57e8dddcb2f1a4bda2dfb93214" exitCode=0 Nov 11 13:42:06 crc kubenswrapper[4842]: I1111 13:42:06.629256 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerDied","Data":"e4536dc21585100d43fc891985e93abf63345f57e8dddcb2f1a4bda2dfb93214"} Nov 11 13:42:08 crc kubenswrapper[4842]: I1111 13:42:08.641224 4842 generic.go:334] "Generic (PLEG): container finished" podID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerID="4b2f89b510d786ea2a69bcb95f8178578798142b97355f3efcbdee146ab8b745" exitCode=0 Nov 11 13:42:08 crc kubenswrapper[4842]: I1111 13:42:08.641318 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerDied","Data":"4b2f89b510d786ea2a69bcb95f8178578798142b97355f3efcbdee146ab8b745"} Nov 11 13:42:09 crc kubenswrapper[4842]: I1111 13:42:09.650223 4842 generic.go:334] "Generic (PLEG): container finished" podID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerID="890ac142175b05c2bfd3007ba3a54f8855be5fadfd7beaf1a01dbfbb1bc0fdea" exitCode=0 Nov 11 13:42:09 crc kubenswrapper[4842]: I1111 13:42:09.650273 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerDied","Data":"890ac142175b05c2bfd3007ba3a54f8855be5fadfd7beaf1a01dbfbb1bc0fdea"} Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.896137 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.976720 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util\") pod \"f6c2e540-e84c-47ee-a474-f337988ea0e5\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.976777 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle\") pod \"f6c2e540-e84c-47ee-a474-f337988ea0e5\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.976844 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7ss2\" (UniqueName: \"kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2\") pod \"f6c2e540-e84c-47ee-a474-f337988ea0e5\" (UID: \"f6c2e540-e84c-47ee-a474-f337988ea0e5\") " Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.978913 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle" (OuterVolumeSpecName: "bundle") pod "f6c2e540-e84c-47ee-a474-f337988ea0e5" (UID: "f6c2e540-e84c-47ee-a474-f337988ea0e5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:42:10 crc kubenswrapper[4842]: I1111 13:42:10.984060 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2" (OuterVolumeSpecName: "kube-api-access-w7ss2") pod "f6c2e540-e84c-47ee-a474-f337988ea0e5" (UID: "f6c2e540-e84c-47ee-a474-f337988ea0e5"). InnerVolumeSpecName "kube-api-access-w7ss2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.078033 4842 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.078067 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7ss2\" (UniqueName: \"kubernetes.io/projected/f6c2e540-e84c-47ee-a474-f337988ea0e5-kube-api-access-w7ss2\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.113147 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util" (OuterVolumeSpecName: "util") pod "f6c2e540-e84c-47ee-a474-f337988ea0e5" (UID: "f6c2e540-e84c-47ee-a474-f337988ea0e5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.178898 4842 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6c2e540-e84c-47ee-a474-f337988ea0e5-util\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.665646 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" event={"ID":"f6c2e540-e84c-47ee-a474-f337988ea0e5","Type":"ContainerDied","Data":"9ae80bb67ee9d8dc4fe3e5b60ef3fd97ea05a967cd2ed2711818d3ffff51b9f2"} Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.665707 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ae80bb67ee9d8dc4fe3e5b60ef3fd97ea05a967cd2ed2711818d3ffff51b9f2" Nov 11 13:42:11 crc kubenswrapper[4842]: I1111 13:42:11.665829 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs" Nov 11 13:42:17 crc kubenswrapper[4842]: I1111 13:42:17.800837 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:42:17 crc kubenswrapper[4842]: I1111 13:42:17.801433 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" containerID="cri-o://a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9" gracePeriod=30 Nov 11 13:42:17 crc kubenswrapper[4842]: I1111 13:42:17.890622 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:42:17 crc kubenswrapper[4842]: I1111 13:42:17.891190 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" containerID="cri-o://d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca" gracePeriod=30 Nov 11 13:42:17 crc kubenswrapper[4842]: E1111 13:42:17.987928 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf34c36d1_6785_42b0_8b27_4ba2e00c4db1.slice/crio-a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9.scope\": RecentStats: unable to find data in memory cache]" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.411424 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.458936 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.572760 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert\") pod \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.572841 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca\") pod \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.572872 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca\") pod \"479b59cc-2cef-4728-a3c8-df498efbeb99\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.572936 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles\") pod \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.572968 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config\") pod \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573008 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw9fp\" (UniqueName: \"kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp\") pod \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\" (UID: \"f34c36d1-6785-42b0-8b27-4ba2e00c4db1\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573040 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq5tv\" (UniqueName: \"kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv\") pod \"479b59cc-2cef-4728-a3c8-df498efbeb99\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573065 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config\") pod \"479b59cc-2cef-4728-a3c8-df498efbeb99\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573120 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert\") pod \"479b59cc-2cef-4728-a3c8-df498efbeb99\" (UID: \"479b59cc-2cef-4728-a3c8-df498efbeb99\") " Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573770 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f34c36d1-6785-42b0-8b27-4ba2e00c4db1" (UID: "f34c36d1-6785-42b0-8b27-4ba2e00c4db1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573779 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca" (OuterVolumeSpecName: "client-ca") pod "479b59cc-2cef-4728-a3c8-df498efbeb99" (UID: "479b59cc-2cef-4728-a3c8-df498efbeb99"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.573874 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config" (OuterVolumeSpecName: "config") pod "f34c36d1-6785-42b0-8b27-4ba2e00c4db1" (UID: "f34c36d1-6785-42b0-8b27-4ba2e00c4db1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.574160 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config" (OuterVolumeSpecName: "config") pod "479b59cc-2cef-4728-a3c8-df498efbeb99" (UID: "479b59cc-2cef-4728-a3c8-df498efbeb99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.574299 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca" (OuterVolumeSpecName: "client-ca") pod "f34c36d1-6785-42b0-8b27-4ba2e00c4db1" (UID: "f34c36d1-6785-42b0-8b27-4ba2e00c4db1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.597246 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f34c36d1-6785-42b0-8b27-4ba2e00c4db1" (UID: "f34c36d1-6785-42b0-8b27-4ba2e00c4db1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.597314 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "479b59cc-2cef-4728-a3c8-df498efbeb99" (UID: "479b59cc-2cef-4728-a3c8-df498efbeb99"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.597346 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv" (OuterVolumeSpecName: "kube-api-access-fq5tv") pod "479b59cc-2cef-4728-a3c8-df498efbeb99" (UID: "479b59cc-2cef-4728-a3c8-df498efbeb99"). InnerVolumeSpecName "kube-api-access-fq5tv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.598174 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp" (OuterVolumeSpecName: "kube-api-access-cw9fp") pod "f34c36d1-6785-42b0-8b27-4ba2e00c4db1" (UID: "f34c36d1-6785-42b0-8b27-4ba2e00c4db1"). InnerVolumeSpecName "kube-api-access-cw9fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675160 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/479b59cc-2cef-4728-a3c8-df498efbeb99-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675198 4842 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675211 4842 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-client-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675222 4842 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-client-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675233 4842 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675273 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675287 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw9fp\" (UniqueName: \"kubernetes.io/projected/f34c36d1-6785-42b0-8b27-4ba2e00c4db1-kube-api-access-cw9fp\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675301 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq5tv\" (UniqueName: \"kubernetes.io/projected/479b59cc-2cef-4728-a3c8-df498efbeb99-kube-api-access-fq5tv\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.675312 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479b59cc-2cef-4728-a3c8-df498efbeb99-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.718756 4842 generic.go:334] "Generic (PLEG): container finished" podID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerID="a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9" exitCode=0 Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.718868 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.721262 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" event={"ID":"f34c36d1-6785-42b0-8b27-4ba2e00c4db1","Type":"ContainerDied","Data":"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9"} Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.721337 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-w2kq7" event={"ID":"f34c36d1-6785-42b0-8b27-4ba2e00c4db1","Type":"ContainerDied","Data":"efda1bc6ee03189da718ca0f50d31ec070a1779aee1bef967264a4a1b990a1a3"} Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.721356 4842 scope.go:117] "RemoveContainer" containerID="a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.723385 4842 generic.go:334] "Generic (PLEG): container finished" podID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerID="d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca" exitCode=0 Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.723414 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" event={"ID":"479b59cc-2cef-4728-a3c8-df498efbeb99","Type":"ContainerDied","Data":"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca"} Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.723456 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" event={"ID":"479b59cc-2cef-4728-a3c8-df498efbeb99","Type":"ContainerDied","Data":"2e8b053dba38330124ece7e2e6dd50ce4ab611b99c8f881276d30a9902a86834"} Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.723527 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.747391 4842 scope.go:117] "RemoveContainer" containerID="a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9" Nov 11 13:42:18 crc kubenswrapper[4842]: E1111 13:42:18.750563 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9\": container with ID starting with a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9 not found: ID does not exist" containerID="a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.750606 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9"} err="failed to get container status \"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9\": rpc error: code = NotFound desc = could not find container \"a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9\": container with ID starting with a27b834eb058425ce1b69d1dc550dedca059abcb98b6b89f63d57097d14d62b9 not found: ID does not exist" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.750645 4842 scope.go:117] "RemoveContainer" containerID="d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.766262 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.778355 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-w2kq7"] Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.780286 4842 scope.go:117] "RemoveContainer" containerID="d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca" Nov 11 13:42:18 crc kubenswrapper[4842]: E1111 13:42:18.782960 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca\": container with ID starting with d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca not found: ID does not exist" containerID="d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.783006 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca"} err="failed to get container status \"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca\": rpc error: code = NotFound desc = could not find container \"d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca\": container with ID starting with d276154f02f6d595255c756ff23b0c51c18f874137861c42f5a7bb0a165d65ca not found: ID does not exist" Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.799161 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:42:18 crc kubenswrapper[4842]: I1111 13:42:18.801667 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v984h"] Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186118 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp"] Nov 11 13:42:19 crc kubenswrapper[4842]: E1111 13:42:19.186407 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186431 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: E1111 13:42:19.186445 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186453 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: E1111 13:42:19.186467 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="pull" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186475 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="pull" Nov 11 13:42:19 crc kubenswrapper[4842]: E1111 13:42:19.186485 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="extract" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186492 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="extract" Nov 11 13:42:19 crc kubenswrapper[4842]: E1111 13:42:19.186502 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="util" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186509 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="util" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186619 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" containerName="controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186638 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" containerName="route-controller-manager" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.186650 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" containerName="extract" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.187093 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.188804 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58fc94c486-fwlcx"] Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.189546 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.214813 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.214829 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.218627 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.218696 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.218628 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.218818 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.218632 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.219027 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.221166 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.221211 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.221293 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.225766 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp"] Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.235053 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58fc94c486-fwlcx"] Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.235354 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.259198 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.385809 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bc3683-1ddb-45c8-a0cd-98dc1719823b-serving-cert\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386035 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-proxy-ca-bundles\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386132 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-client-ca\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386439 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-config\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386529 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvks8\" (UniqueName: \"kubernetes.io/projected/94bc3683-1ddb-45c8-a0cd-98dc1719823b-kube-api-access-tvks8\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386601 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76g4k\" (UniqueName: \"kubernetes.io/projected/6298fcf0-12d8-423a-98e9-22dc9db02ac0-kube-api-access-76g4k\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386681 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6298fcf0-12d8-423a-98e9-22dc9db02ac0-serving-cert\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386753 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-config\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.386823 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-client-ca\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487444 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bc3683-1ddb-45c8-a0cd-98dc1719823b-serving-cert\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487491 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-proxy-ca-bundles\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487509 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-client-ca\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487539 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-config\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487558 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvks8\" (UniqueName: \"kubernetes.io/projected/94bc3683-1ddb-45c8-a0cd-98dc1719823b-kube-api-access-tvks8\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487578 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76g4k\" (UniqueName: \"kubernetes.io/projected/6298fcf0-12d8-423a-98e9-22dc9db02ac0-kube-api-access-76g4k\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487601 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6298fcf0-12d8-423a-98e9-22dc9db02ac0-serving-cert\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487617 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-config\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.487631 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-client-ca\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.488839 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-client-ca\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.488918 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-proxy-ca-bundles\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.489298 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-config\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.489367 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bc3683-1ddb-45c8-a0cd-98dc1719823b-config\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.489410 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6298fcf0-12d8-423a-98e9-22dc9db02ac0-client-ca\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.491366 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bc3683-1ddb-45c8-a0cd-98dc1719823b-serving-cert\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.496854 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6298fcf0-12d8-423a-98e9-22dc9db02ac0-serving-cert\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.515824 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76g4k\" (UniqueName: \"kubernetes.io/projected/6298fcf0-12d8-423a-98e9-22dc9db02ac0-kube-api-access-76g4k\") pod \"controller-manager-58fc94c486-fwlcx\" (UID: \"6298fcf0-12d8-423a-98e9-22dc9db02ac0\") " pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.516121 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvks8\" (UniqueName: \"kubernetes.io/projected/94bc3683-1ddb-45c8-a0cd-98dc1719823b-kube-api-access-tvks8\") pod \"route-controller-manager-7d9657798c-x5gzp\" (UID: \"94bc3683-1ddb-45c8-a0cd-98dc1719823b\") " pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.801044 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:19 crc kubenswrapper[4842]: I1111 13:42:19.807888 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.079277 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="479b59cc-2cef-4728-a3c8-df498efbeb99" path="/var/lib/kubelet/pods/479b59cc-2cef-4728-a3c8-df498efbeb99/volumes" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.083428 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f34c36d1-6785-42b0-8b27-4ba2e00c4db1" path="/var/lib/kubelet/pods/f34c36d1-6785-42b0-8b27-4ba2e00c4db1/volumes" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.192864 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp"] Nov 11 13:42:20 crc kubenswrapper[4842]: W1111 13:42:20.198065 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94bc3683_1ddb_45c8_a0cd_98dc1719823b.slice/crio-ae5d8d96d2433a5cec8ae20bdcff006b13d42856613f4ff474db6bae0054c10f WatchSource:0}: Error finding container ae5d8d96d2433a5cec8ae20bdcff006b13d42856613f4ff474db6bae0054c10f: Status 404 returned error can't find the container with id ae5d8d96d2433a5cec8ae20bdcff006b13d42856613f4ff474db6bae0054c10f Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.297153 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58fc94c486-fwlcx"] Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.736400 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" event={"ID":"6298fcf0-12d8-423a-98e9-22dc9db02ac0","Type":"ContainerStarted","Data":"86d3213ebf4a5e1b3647d8f27fa43da5a6ebf0372ab6a7e5b32b613055085cbb"} Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.736444 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" event={"ID":"6298fcf0-12d8-423a-98e9-22dc9db02ac0","Type":"ContainerStarted","Data":"5e1ad1e5b68c19961a272f4fb93a8edb52b0a9b8f8a2f9e1988d08219908a34d"} Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.738463 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.740519 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" event={"ID":"94bc3683-1ddb-45c8-a0cd-98dc1719823b","Type":"ContainerStarted","Data":"3268f89a143e196e12f2fb4beeecd2f5fe7549f7e6c085d75314a2d647f25fae"} Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.740572 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" event={"ID":"94bc3683-1ddb-45c8-a0cd-98dc1719823b","Type":"ContainerStarted","Data":"ae5d8d96d2433a5cec8ae20bdcff006b13d42856613f4ff474db6bae0054c10f"} Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.740773 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.744690 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.754051 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58fc94c486-fwlcx" podStartSLOduration=2.754031348 podStartE2EDuration="2.754031348s" podCreationTimestamp="2025-11-11 13:42:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:42:20.751158897 +0000 UTC m=+751.411448546" watchObservedRunningTime="2025-11-11 13:42:20.754031348 +0000 UTC m=+751.414320967" Nov 11 13:42:20 crc kubenswrapper[4842]: I1111 13:42:20.800837 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" podStartSLOduration=2.800815111 podStartE2EDuration="2.800815111s" podCreationTimestamp="2025-11-11 13:42:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:42:20.774804646 +0000 UTC m=+751.435094265" watchObservedRunningTime="2025-11-11 13:42:20.800815111 +0000 UTC m=+751.461104730" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.384137 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d9657798c-x5gzp" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.705678 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.706393 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.708233 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.708301 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-b4xdb" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.708437 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.720797 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.736638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxt9v\" (UniqueName: \"kubernetes.io/projected/36861a8f-d7ae-47db-b504-1eb8a1694af7-kube-api-access-rxt9v\") pod \"obo-prometheus-operator-7c8cf85677-5lnv2\" (UID: \"36861a8f-d7ae-47db-b504-1eb8a1694af7\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.838060 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxt9v\" (UniqueName: \"kubernetes.io/projected/36861a8f-d7ae-47db-b504-1eb8a1694af7-kube-api-access-rxt9v\") pod \"obo-prometheus-operator-7c8cf85677-5lnv2\" (UID: \"36861a8f-d7ae-47db-b504-1eb8a1694af7\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.838993 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.839900 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.843142 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.843396 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-gv4b9" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.858226 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.870162 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.871269 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.881026 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxt9v\" (UniqueName: \"kubernetes.io/projected/36861a8f-d7ae-47db-b504-1eb8a1694af7-kube-api-access-rxt9v\") pod \"obo-prometheus-operator-7c8cf85677-5lnv2\" (UID: \"36861a8f-d7ae-47db-b504-1eb8a1694af7\") " pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.881132 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8"] Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.941442 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.941535 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.941594 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:21 crc kubenswrapper[4842]: I1111 13:42:21.941623 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.025167 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.033846 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8ckgj"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.034689 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.042311 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.042368 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.042385 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.042413 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.042436 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.043169 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-rcwtb" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.047985 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.049592 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/688f6c76-6b40-4937-9040-6fc178c7740d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-gxfm8\" (UID: \"688f6c76-6b40-4937-9040-6fc178c7740d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.049645 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.049739 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dc69b653-7e30-40ed-995a-bd2ca759365c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6b97475496-6sfgg\" (UID: \"dc69b653-7e30-40ed-995a-bd2ca759365c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.053832 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8ckgj"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.143557 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad3cf656-3491-4507-bd22-df41ef4576d8-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.143711 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n7rh\" (UniqueName: \"kubernetes.io/projected/ad3cf656-3491-4507-bd22-df41ef4576d8-kube-api-access-6n7rh\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.157184 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.207758 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.246785 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad3cf656-3491-4507-bd22-df41ef4576d8-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.246871 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n7rh\" (UniqueName: \"kubernetes.io/projected/ad3cf656-3491-4507-bd22-df41ef4576d8-kube-api-access-6n7rh\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.251400 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ad3cf656-3491-4507-bd22-df41ef4576d8-observability-operator-tls\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.257832 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-hw9wl"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.258486 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.261208 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-rrgph" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.270382 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n7rh\" (UniqueName: \"kubernetes.io/projected/ad3cf656-3491-4507-bd22-df41ef4576d8-kube-api-access-6n7rh\") pod \"observability-operator-cc5f78dfc-8ckgj\" (UID: \"ad3cf656-3491-4507-bd22-df41ef4576d8\") " pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.273397 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-hw9wl"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.347933 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/582cbdc6-be31-4fde-904d-820ea6228929-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.347998 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm27h\" (UniqueName: \"kubernetes.io/projected/582cbdc6-be31-4fde-904d-820ea6228929-kube-api-access-lm27h\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.398606 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.439955 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.449018 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/582cbdc6-be31-4fde-904d-820ea6228929-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.449075 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm27h\" (UniqueName: \"kubernetes.io/projected/582cbdc6-be31-4fde-904d-820ea6228929-kube-api-access-lm27h\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.450197 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/582cbdc6-be31-4fde-904d-820ea6228929-openshift-service-ca\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.477900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm27h\" (UniqueName: \"kubernetes.io/projected/582cbdc6-be31-4fde-904d-820ea6228929-kube-api-access-lm27h\") pod \"perses-operator-54bc95c9fb-hw9wl\" (UID: \"582cbdc6-be31-4fde-904d-820ea6228929\") " pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.566206 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2"] Nov 11 13:42:22 crc kubenswrapper[4842]: W1111 13:42:22.576256 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36861a8f_d7ae_47db_b504_1eb8a1694af7.slice/crio-465ac99bae027078804c5a3dc5a69b80eb6f5817960e059f47b793da003050f7 WatchSource:0}: Error finding container 465ac99bae027078804c5a3dc5a69b80eb6f5817960e059f47b793da003050f7: Status 404 returned error can't find the container with id 465ac99bae027078804c5a3dc5a69b80eb6f5817960e059f47b793da003050f7 Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.639669 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.751973 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" event={"ID":"36861a8f-d7ae-47db-b504-1eb8a1694af7","Type":"ContainerStarted","Data":"465ac99bae027078804c5a3dc5a69b80eb6f5817960e059f47b793da003050f7"} Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.752749 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8"] Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.753793 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" event={"ID":"dc69b653-7e30-40ed-995a-bd2ca759365c","Type":"ContainerStarted","Data":"b9d510078fa16bfde7f15bac0193c6b16cf84d9f2f40f2704992e32c99afcd81"} Nov 11 13:42:22 crc kubenswrapper[4842]: W1111 13:42:22.770688 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod688f6c76_6b40_4937_9040_6fc178c7740d.slice/crio-c827ca1fe33ab7011383ad3ad42c928becd8fb539c59f00ba8e8e1d42fd678ef WatchSource:0}: Error finding container c827ca1fe33ab7011383ad3ad42c928becd8fb539c59f00ba8e8e1d42fd678ef: Status 404 returned error can't find the container with id c827ca1fe33ab7011383ad3ad42c928becd8fb539c59f00ba8e8e1d42fd678ef Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.915168 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-cc5f78dfc-8ckgj"] Nov 11 13:42:22 crc kubenswrapper[4842]: W1111 13:42:22.926913 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad3cf656_3491_4507_bd22_df41ef4576d8.slice/crio-5451814af7ca81367a68a479d270e43adeb55912c757b12f71f9ff50ec241399 WatchSource:0}: Error finding container 5451814af7ca81367a68a479d270e43adeb55912c757b12f71f9ff50ec241399: Status 404 returned error can't find the container with id 5451814af7ca81367a68a479d270e43adeb55912c757b12f71f9ff50ec241399 Nov 11 13:42:22 crc kubenswrapper[4842]: I1111 13:42:22.955079 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-54bc95c9fb-hw9wl"] Nov 11 13:42:23 crc kubenswrapper[4842]: I1111 13:42:23.758709 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" event={"ID":"ad3cf656-3491-4507-bd22-df41ef4576d8","Type":"ContainerStarted","Data":"5451814af7ca81367a68a479d270e43adeb55912c757b12f71f9ff50ec241399"} Nov 11 13:42:23 crc kubenswrapper[4842]: I1111 13:42:23.760297 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" event={"ID":"688f6c76-6b40-4937-9040-6fc178c7740d","Type":"ContainerStarted","Data":"c827ca1fe33ab7011383ad3ad42c928becd8fb539c59f00ba8e8e1d42fd678ef"} Nov 11 13:42:23 crc kubenswrapper[4842]: I1111 13:42:23.761687 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" event={"ID":"582cbdc6-be31-4fde-904d-820ea6228929","Type":"ContainerStarted","Data":"49a64ce4117a0e05ea2e4956c06402c7c36c643f535fff356ae695c1f2844eb7"} Nov 11 13:42:25 crc kubenswrapper[4842]: I1111 13:42:25.686021 4842 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.377808 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:27ffe36aad6e606e6d0a211f48f3cdb58a53aa0d5e8ead6a444427231261ab9e" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.378521 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:27ffe36aad6e606e6d0a211f48f3cdb58a53aa0d5e8ead6a444427231261ab9e,Command:[],Args:[--namespace=$(NAMESPACE) --images=alertmanager=$(RELATED_IMAGE_ALERTMANAGER) --images=prometheus=$(RELATED_IMAGE_PROMETHEUS) --images=thanos=$(RELATED_IMAGE_THANOS) --images=perses=$(RELATED_IMAGE_PERSES) --images=ui-dashboards=$(RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN) --images=ui-distributed-tracing=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN) --images=ui-distributed-tracing-pf5=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5) --images=ui-distributed-tracing-pf4=$(RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4) --images=ui-logging=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN) --images=ui-logging-pf4=$(RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4) --images=ui-troubleshooting-panel=$(RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN) --images=ui-monitoring=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN) --images=ui-monitoring-pf5=$(RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5) --images=korrel8r=$(RELATED_IMAGE_KORREL8R) --images=health-analyzer=$(RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER) --openshift.enabled=true],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:RELATED_IMAGE_ALERTMANAGER,Value:registry.redhat.io/cluster-observability-operator/alertmanager-rhel9@sha256:4d25b0e31549d780928d2dd3eed7defd9c6d460deb92dcff0fe72c5023029404,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS,Value:registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:a0a1d0e39de54c5b2786c2b82d0104f358b479135c069075ddd4f7cd76826c00,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_THANOS,Value:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PERSES,Value:registry.redhat.io/cluster-observability-operator/perses-0-50-rhel9@sha256:4b5e53d226733237fc5abd0476eb3c96162cf3d8da7aeba8deda631fa8987223,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DASHBOARDS_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-0-4-rhel9@sha256:53125bddbefca2ba2b57c3fd74bd4b376da803e420201220548878f557bd6610,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-1-0-rhel9@sha256:1dbe9a684271e00c8f36d8b96c9b22f6ee3c6f907ea6ad20980901bd533f9a3a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-0-4-rhel9@sha256:6aafab2c90bcbc6702f2d63d585a764baa8de8207e6af7afa60f3976ddfa9bd3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_DISTRIBUTED_TRACING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/distributed-tracing-console-plugin-0-3-rhel9@sha256:9f80851e8137c2c5e5c2aee13fc663f6c7124d9524d88c06c1507748ce84e1ed,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-6-1-rhel9@sha256:2c9b2be12f15f06a24393dbab6a31682cee399d42e2cc04b0dcf03b2b598d5cf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_LOGGING_PLUGIN_PF4,Value:registry.redhat.io/cluster-observability-operator/logging-console-plugin-6-0-rhel9@sha256:e9042d93f624790c450724158a8323277e4dd136530c763fec8db31f51fd8552,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_TROUBLESHOOTING_PANEL_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/troubleshooting-panel-console-plugin-0-4-rhel9@sha256:456d45001816b9adc38745e0ad8705bdc0150d03d0f65e0dfa9caf3fb8980fad,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-0-5-rhel9@sha256:f3446969c67c18b44bee38ac946091fe9397a2117cb5b7aacb39406461c1efe1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CONSOLE_MONITORING_PLUGIN_PF5,Value:registry.redhat.io/cluster-observability-operator/monitoring-console-plugin-0-4-rhel9@sha256:ade84f8be7d23bd4b9c80e07462dc947280f0bcf6071e6edd927fef54c254b7e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KORREL8R,Value:registry.redhat.io/cluster-observability-operator/korrel8r-rhel9@sha256:039e139cf9217bbe72248674df76cbe4baf4bef9f8dc367d2cb51eae9c4aa9d7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLUSTER_HEALTH_ANALYZER,Value:registry.redhat.io/cluster-observability-operator/cluster-health-analyzer-rhel9@sha256:142180f277f0221ef2d4176f9af6dcdb4e7ab434a68f0dfad2ee5bee0e667ddd,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.2.2,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{400 -3} {} 400m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:observability-operator-tls,ReadOnly:true,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6n7rh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod observability-operator-cc5f78dfc-8ckgj_openshift-operators(ad3cf656-3491-4507-bd22-df41ef4576d8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.379751 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" podUID="ad3cf656-3491-4507-bd22-df41ef4576d8" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.454871 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:e54c1e1301be66933f3ecb01d5a0ca27f58aabfd905b18b7d057bbf23bdb7b0d" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.455021 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:e54c1e1301be66933f3ecb01d5a0ca27f58aabfd905b18b7d057bbf23bdb7b0d,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.2.2,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-6b97475496-6sfgg_openshift-operators(dc69b653-7e30-40ed-995a-bd2ca759365c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.456173 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" podUID="dc69b653-7e30-40ed-995a-bd2ca759365c" Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.863395 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" event={"ID":"36861a8f-d7ae-47db-b504-1eb8a1694af7","Type":"ContainerStarted","Data":"0ca0141e472c1220497d280002d5424571f39a053e8d386e07b26110814b1f71"} Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.865355 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" event={"ID":"688f6c76-6b40-4937-9040-6fc178c7740d","Type":"ContainerStarted","Data":"1e1bb9fcfd2ac5cef8a45d96a097c787e000c7add9b58db95411d3e9fea79951"} Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.868066 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" event={"ID":"582cbdc6-be31-4fde-904d-820ea6228929","Type":"ContainerStarted","Data":"ffb2bddd27639b25a91cc7690c82785fca625d374a68dd18c8679e852d9123be"} Nov 11 13:42:40 crc kubenswrapper[4842]: E1111 13:42:40.869566 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/cluster-observability-rhel9-operator@sha256:27ffe36aad6e606e6d0a211f48f3cdb58a53aa0d5e8ead6a444427231261ab9e\\\"\"" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" podUID="ad3cf656-3491-4507-bd22-df41ef4576d8" Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.885791 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-7c8cf85677-5lnv2" podStartSLOduration=2.028786603 podStartE2EDuration="19.885774775s" podCreationTimestamp="2025-11-11 13:42:21 +0000 UTC" firstStartedPulling="2025-11-11 13:42:22.580505429 +0000 UTC m=+753.240795038" lastFinishedPulling="2025-11-11 13:42:40.437493591 +0000 UTC m=+771.097783210" observedRunningTime="2025-11-11 13:42:40.885374362 +0000 UTC m=+771.545663981" watchObservedRunningTime="2025-11-11 13:42:40.885774775 +0000 UTC m=+771.546064394" Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.967912 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-gxfm8" podStartSLOduration=2.314394583 podStartE2EDuration="19.967886356s" podCreationTimestamp="2025-11-11 13:42:21 +0000 UTC" firstStartedPulling="2025-11-11 13:42:22.774763995 +0000 UTC m=+753.435053604" lastFinishedPulling="2025-11-11 13:42:40.428255758 +0000 UTC m=+771.088545377" observedRunningTime="2025-11-11 13:42:40.927979122 +0000 UTC m=+771.588268751" watchObservedRunningTime="2025-11-11 13:42:40.967886356 +0000 UTC m=+771.628175985" Nov 11 13:42:40 crc kubenswrapper[4842]: I1111 13:42:40.969744 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" podStartSLOduration=1.492551528 podStartE2EDuration="18.969737025s" podCreationTimestamp="2025-11-11 13:42:22 +0000 UTC" firstStartedPulling="2025-11-11 13:42:22.960532111 +0000 UTC m=+753.620821730" lastFinishedPulling="2025-11-11 13:42:40.437717598 +0000 UTC m=+771.098007227" observedRunningTime="2025-11-11 13:42:40.966581505 +0000 UTC m=+771.626871124" watchObservedRunningTime="2025-11-11 13:42:40.969737025 +0000 UTC m=+771.630026644" Nov 11 13:42:41 crc kubenswrapper[4842]: I1111 13:42:41.874086 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" event={"ID":"dc69b653-7e30-40ed-995a-bd2ca759365c","Type":"ContainerStarted","Data":"899e809e70722d74013fe915ab9012b542530e0e99a4d5d1f3ef110c735f6be3"} Nov 11 13:42:41 crc kubenswrapper[4842]: I1111 13:42:41.874394 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:41 crc kubenswrapper[4842]: I1111 13:42:41.892121 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6b97475496-6sfgg" podStartSLOduration=-9223372015.962694 podStartE2EDuration="20.892082219s" podCreationTimestamp="2025-11-11 13:42:21 +0000 UTC" firstStartedPulling="2025-11-11 13:42:22.456034986 +0000 UTC m=+753.116324605" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:42:41.887450512 +0000 UTC m=+772.547740151" watchObservedRunningTime="2025-11-11 13:42:41.892082219 +0000 UTC m=+772.552371838" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.136849 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.138205 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.152148 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.227433 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.227831 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.227872 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwnzm\" (UniqueName: \"kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.329500 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwnzm\" (UniqueName: \"kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.329577 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.329634 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.330075 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.330623 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.355085 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwnzm\" (UniqueName: \"kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm\") pod \"redhat-marketplace-vj7lq\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.453961 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:43 crc kubenswrapper[4842]: I1111 13:42:43.908644 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:43 crc kubenswrapper[4842]: W1111 13:42:43.914793 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2d9d7e5_ebc6_4a04_939f_db7f18b5822b.slice/crio-26daec74b5a1b4c2bbab63ff645b0850f85a8fb3b49e4923839deadff2391949 WatchSource:0}: Error finding container 26daec74b5a1b4c2bbab63ff645b0850f85a8fb3b49e4923839deadff2391949: Status 404 returned error can't find the container with id 26daec74b5a1b4c2bbab63ff645b0850f85a8fb3b49e4923839deadff2391949 Nov 11 13:42:44 crc kubenswrapper[4842]: I1111 13:42:44.889461 4842 generic.go:334] "Generic (PLEG): container finished" podID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerID="1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d" exitCode=0 Nov 11 13:42:44 crc kubenswrapper[4842]: I1111 13:42:44.889500 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerDied","Data":"1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d"} Nov 11 13:42:44 crc kubenswrapper[4842]: I1111 13:42:44.889543 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerStarted","Data":"26daec74b5a1b4c2bbab63ff645b0850f85a8fb3b49e4923839deadff2391949"} Nov 11 13:42:44 crc kubenswrapper[4842]: I1111 13:42:44.961323 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:42:44 crc kubenswrapper[4842]: I1111 13:42:44.961413 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:42:46 crc kubenswrapper[4842]: I1111 13:42:46.900937 4842 generic.go:334] "Generic (PLEG): container finished" podID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerID="7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516" exitCode=0 Nov 11 13:42:46 crc kubenswrapper[4842]: I1111 13:42:46.900978 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerDied","Data":"7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516"} Nov 11 13:42:47 crc kubenswrapper[4842]: I1111 13:42:47.908610 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerStarted","Data":"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c"} Nov 11 13:42:47 crc kubenswrapper[4842]: I1111 13:42:47.923617 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vj7lq" podStartSLOduration=2.410826419 podStartE2EDuration="4.923601485s" podCreationTimestamp="2025-11-11 13:42:43 +0000 UTC" firstStartedPulling="2025-11-11 13:42:44.89073757 +0000 UTC m=+775.551027189" lastFinishedPulling="2025-11-11 13:42:47.403512636 +0000 UTC m=+778.063802255" observedRunningTime="2025-11-11 13:42:47.922480049 +0000 UTC m=+778.582769668" watchObservedRunningTime="2025-11-11 13:42:47.923601485 +0000 UTC m=+778.583891104" Nov 11 13:42:52 crc kubenswrapper[4842]: I1111 13:42:52.642781 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-54bc95c9fb-hw9wl" Nov 11 13:42:52 crc kubenswrapper[4842]: I1111 13:42:52.937005 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" event={"ID":"ad3cf656-3491-4507-bd22-df41ef4576d8","Type":"ContainerStarted","Data":"6cbadde54cf28cb3ab78f1e58da262c6a1b7f104d0d6d49a97a2b61af13396ad"} Nov 11 13:42:52 crc kubenswrapper[4842]: I1111 13:42:52.938266 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:52 crc kubenswrapper[4842]: I1111 13:42:52.939538 4842 patch_prober.go:28] interesting pod/observability-operator-cc5f78dfc-8ckgj container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.45:8081/healthz\": dial tcp 10.217.0.45:8081: connect: connection refused" start-of-body= Nov 11 13:42:52 crc kubenswrapper[4842]: I1111 13:42:52.939579 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" podUID="ad3cf656-3491-4507-bd22-df41ef4576d8" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.45:8081/healthz\": dial tcp 10.217.0.45:8081: connect: connection refused" Nov 11 13:42:53 crc kubenswrapper[4842]: I1111 13:42:53.454835 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:53 crc kubenswrapper[4842]: I1111 13:42:53.454929 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:53 crc kubenswrapper[4842]: I1111 13:42:53.494399 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:53 crc kubenswrapper[4842]: I1111 13:42:53.511593 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" podStartSLOduration=1.683031219 podStartE2EDuration="31.511576504s" podCreationTimestamp="2025-11-11 13:42:22 +0000 UTC" firstStartedPulling="2025-11-11 13:42:22.929631822 +0000 UTC m=+753.589921441" lastFinishedPulling="2025-11-11 13:42:52.758177107 +0000 UTC m=+783.418466726" observedRunningTime="2025-11-11 13:42:52.95859208 +0000 UTC m=+783.618881699" watchObservedRunningTime="2025-11-11 13:42:53.511576504 +0000 UTC m=+784.171866113" Nov 11 13:42:53 crc kubenswrapper[4842]: I1111 13:42:53.960626 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-cc5f78dfc-8ckgj" Nov 11 13:42:54 crc kubenswrapper[4842]: I1111 13:42:54.034727 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:54 crc kubenswrapper[4842]: I1111 13:42:54.082856 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:55 crc kubenswrapper[4842]: I1111 13:42:55.950697 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vj7lq" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="registry-server" containerID="cri-o://1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c" gracePeriod=2 Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.391078 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.571451 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwnzm\" (UniqueName: \"kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm\") pod \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.571575 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content\") pod \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.571619 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities\") pod \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\" (UID: \"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b\") " Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.572414 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities" (OuterVolumeSpecName: "utilities") pod "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" (UID: "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.576188 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm" (OuterVolumeSpecName: "kube-api-access-xwnzm") pod "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" (UID: "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b"). InnerVolumeSpecName "kube-api-access-xwnzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.589503 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" (UID: "c2d9d7e5-ebc6-4a04-939f-db7f18b5822b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.673498 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwnzm\" (UniqueName: \"kubernetes.io/projected/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-kube-api-access-xwnzm\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.673533 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.673546 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.958854 4842 generic.go:334] "Generic (PLEG): container finished" podID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerID="1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c" exitCode=0 Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.958908 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerDied","Data":"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c"} Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.958932 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vj7lq" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.958949 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vj7lq" event={"ID":"c2d9d7e5-ebc6-4a04-939f-db7f18b5822b","Type":"ContainerDied","Data":"26daec74b5a1b4c2bbab63ff645b0850f85a8fb3b49e4923839deadff2391949"} Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.958971 4842 scope.go:117] "RemoveContainer" containerID="1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.975320 4842 scope.go:117] "RemoveContainer" containerID="7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516" Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.994358 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:56 crc kubenswrapper[4842]: I1111 13:42:56.994506 4842 scope.go:117] "RemoveContainer" containerID="1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.001934 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vj7lq"] Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.020292 4842 scope.go:117] "RemoveContainer" containerID="1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c" Nov 11 13:42:57 crc kubenswrapper[4842]: E1111 13:42:57.020828 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c\": container with ID starting with 1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c not found: ID does not exist" containerID="1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.020880 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c"} err="failed to get container status \"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c\": rpc error: code = NotFound desc = could not find container \"1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c\": container with ID starting with 1fd391e39624f9d32902161ef2875c49686807f4227b415ec664b8c3b62b195c not found: ID does not exist" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.020917 4842 scope.go:117] "RemoveContainer" containerID="7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516" Nov 11 13:42:57 crc kubenswrapper[4842]: E1111 13:42:57.021351 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516\": container with ID starting with 7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516 not found: ID does not exist" containerID="7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.021392 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516"} err="failed to get container status \"7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516\": rpc error: code = NotFound desc = could not find container \"7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516\": container with ID starting with 7e2afeab84bdf96d1a535a6544800dbc5a8fec90924dcd1d19811f7f48ad2516 not found: ID does not exist" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.021420 4842 scope.go:117] "RemoveContainer" containerID="1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d" Nov 11 13:42:57 crc kubenswrapper[4842]: E1111 13:42:57.021759 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d\": container with ID starting with 1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d not found: ID does not exist" containerID="1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d" Nov 11 13:42:57 crc kubenswrapper[4842]: I1111 13:42:57.021797 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d"} err="failed to get container status \"1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d\": rpc error: code = NotFound desc = could not find container \"1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d\": container with ID starting with 1bb637c36b5c4fca5577738fee6b1f9bee20230ee96262a79ca1e7145394598d not found: ID does not exist" Nov 11 13:42:58 crc kubenswrapper[4842]: I1111 13:42:58.066204 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" path="/var/lib/kubelet/pods/c2d9d7e5-ebc6-4a04-939f-db7f18b5822b/volumes" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.837058 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:05 crc kubenswrapper[4842]: E1111 13:43:05.838023 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="extract-utilities" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.838046 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="extract-utilities" Nov 11 13:43:05 crc kubenswrapper[4842]: E1111 13:43:05.838066 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="extract-content" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.838078 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="extract-content" Nov 11 13:43:05 crc kubenswrapper[4842]: E1111 13:43:05.838143 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="registry-server" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.838156 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="registry-server" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.838320 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d9d7e5-ebc6-4a04-939f-db7f18b5822b" containerName="registry-server" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.839401 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.861927 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.982738 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.982823 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlptn\" (UniqueName: \"kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:05 crc kubenswrapper[4842]: I1111 13:43:05.982904 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.083988 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.084406 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlptn\" (UniqueName: \"kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.084445 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.085177 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.085546 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.104534 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlptn\" (UniqueName: \"kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn\") pod \"certified-operators-wl2bw\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.166351 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:06 crc kubenswrapper[4842]: I1111 13:43:06.608706 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:07 crc kubenswrapper[4842]: I1111 13:43:07.018827 4842 generic.go:334] "Generic (PLEG): container finished" podID="7e157548-8425-433b-a5ed-394a662310d8" containerID="4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551" exitCode=0 Nov 11 13:43:07 crc kubenswrapper[4842]: I1111 13:43:07.018904 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerDied","Data":"4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551"} Nov 11 13:43:07 crc kubenswrapper[4842]: I1111 13:43:07.018943 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerStarted","Data":"35451c5c2ae7075fe2a63e1b0c8b91c0272e239e5d1eebe95a4ae2045bcc0cdb"} Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.038684 4842 generic.go:334] "Generic (PLEG): container finished" podID="7e157548-8425-433b-a5ed-394a662310d8" containerID="c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971" exitCode=0 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.038790 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerDied","Data":"c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971"} Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.835319 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.838015 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/a6d815214afcb93f379916e45350d3de39072121f31a1d7eaaf6e22c2dtsngs"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.841874 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.842088 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nk726" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="registry-server" containerID="cri-o://73398d067055fe2e71719fa59514d9ebabe150c55ee2d5f99440103705a05797" gracePeriod=30 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.848792 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.860948 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.861333 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5frqb" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="registry-server" containerID="cri-o://403583adc84de5b663c09b6fc723fcedeb9a9abe7a22ef9dd976b15b437c3a63" gracePeriod=30 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.863871 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.864079 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" podUID="ce99883b-0503-48f7-9711-c7caaa523a00" containerName="marketplace-operator" containerID="cri-o://fd1626eb34b9e9a09034f46dad8190949550347983a3c71a56e689006b552d58" gracePeriod=30 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.876678 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.877023 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4gcmx" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="registry-server" containerID="cri-o://9fe3582ac4652fc88bf16a93154f9f2fe15cb2d184f9fb3c79008775907ac2cd" gracePeriod=30 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.883508 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94sst"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.884371 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.897885 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.898154 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cthhp" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="registry-server" containerID="cri-o://db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf" gracePeriod=30 Nov 11 13:43:09 crc kubenswrapper[4842]: I1111 13:43:09.914009 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94sst"] Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.031517 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlhtp\" (UniqueName: \"kubernetes.io/projected/46efe664-2d21-4657-b466-579abe4f7f02-kube-api-access-qlhtp\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.031560 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.031583 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.059162 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa5e095a-f74a-470d-9070-769a25b3299d" containerID="403583adc84de5b663c09b6fc723fcedeb9a9abe7a22ef9dd976b15b437c3a63" exitCode=0 Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.065440 4842 generic.go:334] "Generic (PLEG): container finished" podID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerID="73398d067055fe2e71719fa59514d9ebabe150c55ee2d5f99440103705a05797" exitCode=0 Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.067793 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6c2e540-e84c-47ee-a474-f337988ea0e5" path="/var/lib/kubelet/pods/f6c2e540-e84c-47ee-a474-f337988ea0e5/volumes" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.068399 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerDied","Data":"403583adc84de5b663c09b6fc723fcedeb9a9abe7a22ef9dd976b15b437c3a63"} Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.068438 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerDied","Data":"73398d067055fe2e71719fa59514d9ebabe150c55ee2d5f99440103705a05797"} Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.071542 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerStarted","Data":"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51"} Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.073348 4842 generic.go:334] "Generic (PLEG): container finished" podID="ce99883b-0503-48f7-9711-c7caaa523a00" containerID="fd1626eb34b9e9a09034f46dad8190949550347983a3c71a56e689006b552d58" exitCode=0 Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.073410 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" event={"ID":"ce99883b-0503-48f7-9711-c7caaa523a00","Type":"ContainerDied","Data":"fd1626eb34b9e9a09034f46dad8190949550347983a3c71a56e689006b552d58"} Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.077573 4842 generic.go:334] "Generic (PLEG): container finished" podID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerID="9fe3582ac4652fc88bf16a93154f9f2fe15cb2d184f9fb3c79008775907ac2cd" exitCode=0 Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.077623 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerDied","Data":"9fe3582ac4652fc88bf16a93154f9f2fe15cb2d184f9fb3c79008775907ac2cd"} Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.132778 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlhtp\" (UniqueName: \"kubernetes.io/projected/46efe664-2d21-4657-b466-579abe4f7f02-kube-api-access-qlhtp\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.132819 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.132841 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.134380 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.140490 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/46efe664-2d21-4657-b466-579abe4f7f02-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.151653 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlhtp\" (UniqueName: \"kubernetes.io/projected/46efe664-2d21-4657-b466-579abe4f7f02-kube-api-access-qlhtp\") pod \"marketplace-operator-79b997595-94sst\" (UID: \"46efe664-2d21-4657-b466-579abe4f7f02\") " pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.251058 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.289983 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.309869 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wl2bw" podStartSLOduration=2.7905670049999998 podStartE2EDuration="5.309844329s" podCreationTimestamp="2025-11-11 13:43:05 +0000 UTC" firstStartedPulling="2025-11-11 13:43:07.021442359 +0000 UTC m=+797.681731978" lastFinishedPulling="2025-11-11 13:43:09.540719673 +0000 UTC m=+800.201009302" observedRunningTime="2025-11-11 13:43:10.092204338 +0000 UTC m=+800.752493957" watchObservedRunningTime="2025-11-11 13:43:10.309844329 +0000 UTC m=+800.970133948" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.431487 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.438494 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content\") pod \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.438555 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d7vb\" (UniqueName: \"kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb\") pod \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.438586 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities\") pod \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\" (UID: \"4b47536f-3d40-4117-bb3c-b7751e6bcc16\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.440196 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities" (OuterVolumeSpecName: "utilities") pod "4b47536f-3d40-4117-bb3c-b7751e6bcc16" (UID: "4b47536f-3d40-4117-bb3c-b7751e6bcc16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.445359 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb" (OuterVolumeSpecName: "kube-api-access-4d7vb") pod "4b47536f-3d40-4117-bb3c-b7751e6bcc16" (UID: "4b47536f-3d40-4117-bb3c-b7751e6bcc16"). InnerVolumeSpecName "kube-api-access-4d7vb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.453010 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.461562 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.493144 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.499517 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b47536f-3d40-4117-bb3c-b7751e6bcc16" (UID: "4b47536f-3d40-4117-bb3c-b7751e6bcc16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540442 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkm66\" (UniqueName: \"kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66\") pod \"fa5e095a-f74a-470d-9070-769a25b3299d\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540485 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities\") pod \"fa5e095a-f74a-470d-9070-769a25b3299d\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540550 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r5jt\" (UniqueName: \"kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt\") pod \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540569 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities\") pod \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540614 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content\") pod \"fa5e095a-f74a-470d-9070-769a25b3299d\" (UID: \"fa5e095a-f74a-470d-9070-769a25b3299d\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540639 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content\") pod \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\" (UID: \"e3601123-3a84-4cc7-9b1a-2b56cffb00a6\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540880 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540899 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d7vb\" (UniqueName: \"kubernetes.io/projected/4b47536f-3d40-4117-bb3c-b7751e6bcc16-kube-api-access-4d7vb\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.540910 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b47536f-3d40-4117-bb3c-b7751e6bcc16-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.541631 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities" (OuterVolumeSpecName: "utilities") pod "fa5e095a-f74a-470d-9070-769a25b3299d" (UID: "fa5e095a-f74a-470d-9070-769a25b3299d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.541693 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities" (OuterVolumeSpecName: "utilities") pod "e3601123-3a84-4cc7-9b1a-2b56cffb00a6" (UID: "e3601123-3a84-4cc7-9b1a-2b56cffb00a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.543385 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt" (OuterVolumeSpecName: "kube-api-access-5r5jt") pod "e3601123-3a84-4cc7-9b1a-2b56cffb00a6" (UID: "e3601123-3a84-4cc7-9b1a-2b56cffb00a6"). InnerVolumeSpecName "kube-api-access-5r5jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.543782 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66" (OuterVolumeSpecName: "kube-api-access-vkm66") pod "fa5e095a-f74a-470d-9070-769a25b3299d" (UID: "fa5e095a-f74a-470d-9070-769a25b3299d"). InnerVolumeSpecName "kube-api-access-vkm66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.560568 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3601123-3a84-4cc7-9b1a-2b56cffb00a6" (UID: "e3601123-3a84-4cc7-9b1a-2b56cffb00a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.595677 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa5e095a-f74a-470d-9070-769a25b3299d" (UID: "fa5e095a-f74a-470d-9070-769a25b3299d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.641809 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities\") pod \"fd55de82-0278-4627-870c-9b09edbca3d5\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642116 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vpvp\" (UniqueName: \"kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp\") pod \"fd55de82-0278-4627-870c-9b09edbca3d5\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642249 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content\") pod \"fd55de82-0278-4627-870c-9b09edbca3d5\" (UID: \"fd55de82-0278-4627-870c-9b09edbca3d5\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642387 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca\") pod \"ce99883b-0503-48f7-9711-c7caaa523a00\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642551 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5t6z\" (UniqueName: \"kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z\") pod \"ce99883b-0503-48f7-9711-c7caaa523a00\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642636 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities" (OuterVolumeSpecName: "utilities") pod "fd55de82-0278-4627-870c-9b09edbca3d5" (UID: "fd55de82-0278-4627-870c-9b09edbca3d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642665 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics\") pod \"ce99883b-0503-48f7-9711-c7caaa523a00\" (UID: \"ce99883b-0503-48f7-9711-c7caaa523a00\") " Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.642869 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "ce99883b-0503-48f7-9711-c7caaa523a00" (UID: "ce99883b-0503-48f7-9711-c7caaa523a00"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643170 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643239 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643298 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkm66\" (UniqueName: \"kubernetes.io/projected/fa5e095a-f74a-470d-9070-769a25b3299d-kube-api-access-vkm66\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643363 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643491 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r5jt\" (UniqueName: \"kubernetes.io/projected/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-kube-api-access-5r5jt\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643571 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643652 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3601123-3a84-4cc7-9b1a-2b56cffb00a6-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.643710 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa5e095a-f74a-470d-9070-769a25b3299d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.645454 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z" (OuterVolumeSpecName: "kube-api-access-q5t6z") pod "ce99883b-0503-48f7-9711-c7caaa523a00" (UID: "ce99883b-0503-48f7-9711-c7caaa523a00"). InnerVolumeSpecName "kube-api-access-q5t6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.645481 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "ce99883b-0503-48f7-9711-c7caaa523a00" (UID: "ce99883b-0503-48f7-9711-c7caaa523a00"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.645513 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp" (OuterVolumeSpecName: "kube-api-access-8vpvp") pod "fd55de82-0278-4627-870c-9b09edbca3d5" (UID: "fd55de82-0278-4627-870c-9b09edbca3d5"). InnerVolumeSpecName "kube-api-access-8vpvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.734864 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94sst"] Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.744588 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vpvp\" (UniqueName: \"kubernetes.io/projected/fd55de82-0278-4627-870c-9b09edbca3d5-kube-api-access-8vpvp\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.744653 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5t6z\" (UniqueName: \"kubernetes.io/projected/ce99883b-0503-48f7-9711-c7caaa523a00-kube-api-access-q5t6z\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.744665 4842 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ce99883b-0503-48f7-9711-c7caaa523a00-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.746282 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd55de82-0278-4627-870c-9b09edbca3d5" (UID: "fd55de82-0278-4627-870c-9b09edbca3d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:10 crc kubenswrapper[4842]: I1111 13:43:10.846160 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd55de82-0278-4627-870c-9b09edbca3d5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.083787 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" event={"ID":"46efe664-2d21-4657-b466-579abe4f7f02","Type":"ContainerStarted","Data":"a626c47595e41af4bf8a61606d6cc2c494b8e56da4bc2f9776cf519d1617ec97"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.083845 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" event={"ID":"46efe664-2d21-4657-b466-579abe4f7f02","Type":"ContainerStarted","Data":"ad7ef0c20e72309442d01cedee9dcfcd6adb079d83c3107e0fa8d019bfd6bd0e"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.083884 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.085490 4842 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-94sst container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.49:8080/healthz\": dial tcp 10.217.0.49:8080: connect: connection refused" start-of-body= Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.085562 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" podUID="46efe664-2d21-4657-b466-579abe4f7f02" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.49:8080/healthz\": dial tcp 10.217.0.49:8080: connect: connection refused" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.088321 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5frqb" event={"ID":"fa5e095a-f74a-470d-9070-769a25b3299d","Type":"ContainerDied","Data":"519a47887aed62c9ac8c33b41c69378237ddbd92e1096034d7354ab58c5f7b3b"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.088375 4842 scope.go:117] "RemoveContainer" containerID="403583adc84de5b663c09b6fc723fcedeb9a9abe7a22ef9dd976b15b437c3a63" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.088543 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5frqb" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.095737 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nk726" event={"ID":"4b47536f-3d40-4117-bb3c-b7751e6bcc16","Type":"ContainerDied","Data":"eab07e99933087250f04c1b1be72ae167d1692d9680ce1804b4b14bd9f5a89d5"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.095837 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nk726" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.104737 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.104737 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-95g48" event={"ID":"ce99883b-0503-48f7-9711-c7caaa523a00","Type":"ContainerDied","Data":"cb605b35a4f48b8b59191042c8eebb8e46c9af6c3f2b03f3a5bb47ef3877f5b1"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.112049 4842 scope.go:117] "RemoveContainer" containerID="269eeab4b52f05b6e766111e6764cc3dfbce241a4c6ede6e8477efab07efd25c" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.112261 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" podStartSLOduration=2.1122454 podStartE2EDuration="2.1122454s" podCreationTimestamp="2025-11-11 13:43:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:43:11.111718563 +0000 UTC m=+801.772008182" watchObservedRunningTime="2025-11-11 13:43:11.1122454 +0000 UTC m=+801.772535019" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.114467 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4gcmx" event={"ID":"e3601123-3a84-4cc7-9b1a-2b56cffb00a6","Type":"ContainerDied","Data":"5e70f65b5ed264a9f9dc2365c8912f3ebde3ec12c6d063ed4025f649095499c0"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.114556 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4gcmx" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.118994 4842 generic.go:334] "Generic (PLEG): container finished" podID="fd55de82-0278-4627-870c-9b09edbca3d5" containerID="db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf" exitCode=0 Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.119523 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerDied","Data":"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.119578 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cthhp" event={"ID":"fd55de82-0278-4627-870c-9b09edbca3d5","Type":"ContainerDied","Data":"a46b7e9b711b9bbaf40760ee7a50091598453a0e384e519e031b8df12c9aa63c"} Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.120127 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wl2bw" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="registry-server" containerID="cri-o://06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51" gracePeriod=30 Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.120184 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cthhp" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.145802 4842 scope.go:117] "RemoveContainer" containerID="5ccefd7a742b43414add59b00f1abfe6ae076a59aa7b2f571d0ee5cbc631012e" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.147758 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.152048 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5frqb"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.161739 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.165752 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nk726"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.186486 4842 scope.go:117] "RemoveContainer" containerID="73398d067055fe2e71719fa59514d9ebabe150c55ee2d5f99440103705a05797" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.195258 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.205650 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-95g48"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.206371 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.212003 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cthhp"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.216023 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.218961 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4gcmx"] Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.227275 4842 scope.go:117] "RemoveContainer" containerID="eb58a7e758f32f36e5e91036e13dafb06b66e42219be7969f2944cecda66fac6" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.242409 4842 scope.go:117] "RemoveContainer" containerID="e8307f0c7a1d9dfa1498264a9f0bc93d72a8e95914a2eec3f21d3dd25d96e480" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.255900 4842 scope.go:117] "RemoveContainer" containerID="fd1626eb34b9e9a09034f46dad8190949550347983a3c71a56e689006b552d58" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.268438 4842 scope.go:117] "RemoveContainer" containerID="9fe3582ac4652fc88bf16a93154f9f2fe15cb2d184f9fb3c79008775907ac2cd" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.280799 4842 scope.go:117] "RemoveContainer" containerID="d84734d34147f77c646766a40a755527dc9fe6f9d57fe133aff2b707396859c8" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.299455 4842 scope.go:117] "RemoveContainer" containerID="10b9ee229f39dc18eb09b078fbddcc08016c7d90cbfc87d19ba5f8ffc46bdb5b" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.313736 4842 scope.go:117] "RemoveContainer" containerID="db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.327501 4842 scope.go:117] "RemoveContainer" containerID="d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.350264 4842 scope.go:117] "RemoveContainer" containerID="6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.364254 4842 scope.go:117] "RemoveContainer" containerID="db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf" Nov 11 13:43:11 crc kubenswrapper[4842]: E1111 13:43:11.364630 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf\": container with ID starting with db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf not found: ID does not exist" containerID="db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.364668 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf"} err="failed to get container status \"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf\": rpc error: code = NotFound desc = could not find container \"db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf\": container with ID starting with db0e9071a6b3910ecf877c12028d16d49a701b152e7c892aff1cf7be27c5eadf not found: ID does not exist" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.364693 4842 scope.go:117] "RemoveContainer" containerID="d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4" Nov 11 13:43:11 crc kubenswrapper[4842]: E1111 13:43:11.365035 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4\": container with ID starting with d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4 not found: ID does not exist" containerID="d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.365067 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4"} err="failed to get container status \"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4\": rpc error: code = NotFound desc = could not find container \"d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4\": container with ID starting with d5386cd498bb020c626a7632baa2c5922ea92ec9c06d95833ff84a52e1dc79e4 not found: ID does not exist" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.365088 4842 scope.go:117] "RemoveContainer" containerID="6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370" Nov 11 13:43:11 crc kubenswrapper[4842]: E1111 13:43:11.365381 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370\": container with ID starting with 6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370 not found: ID does not exist" containerID="6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.365404 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370"} err="failed to get container status \"6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370\": rpc error: code = NotFound desc = could not find container \"6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370\": container with ID starting with 6528a33ba3ce013b734ee95f50c948e498d6f8ea04781502c6ace23489aa0370 not found: ID does not exist" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.431559 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wl2bw_7e157548-8425-433b-a5ed-394a662310d8/registry-server/0.log" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.432183 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.552169 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content\") pod \"7e157548-8425-433b-a5ed-394a662310d8\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.552435 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities\") pod \"7e157548-8425-433b-a5ed-394a662310d8\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.552536 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlptn\" (UniqueName: \"kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn\") pod \"7e157548-8425-433b-a5ed-394a662310d8\" (UID: \"7e157548-8425-433b-a5ed-394a662310d8\") " Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.554537 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities" (OuterVolumeSpecName: "utilities") pod "7e157548-8425-433b-a5ed-394a662310d8" (UID: "7e157548-8425-433b-a5ed-394a662310d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.557367 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn" (OuterVolumeSpecName: "kube-api-access-rlptn") pod "7e157548-8425-433b-a5ed-394a662310d8" (UID: "7e157548-8425-433b-a5ed-394a662310d8"). InnerVolumeSpecName "kube-api-access-rlptn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.597745 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e157548-8425-433b-a5ed-394a662310d8" (UID: "7e157548-8425-433b-a5ed-394a662310d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.653973 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlptn\" (UniqueName: \"kubernetes.io/projected/7e157548-8425-433b-a5ed-394a662310d8-kube-api-access-rlptn\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.654013 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:11 crc kubenswrapper[4842]: I1111 13:43:11.654027 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e157548-8425-433b-a5ed-394a662310d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.066556 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" path="/var/lib/kubelet/pods/4b47536f-3d40-4117-bb3c-b7751e6bcc16/volumes" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.067377 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce99883b-0503-48f7-9711-c7caaa523a00" path="/var/lib/kubelet/pods/ce99883b-0503-48f7-9711-c7caaa523a00/volumes" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.067945 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" path="/var/lib/kubelet/pods/e3601123-3a84-4cc7-9b1a-2b56cffb00a6/volumes" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.069178 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" path="/var/lib/kubelet/pods/fa5e095a-f74a-470d-9070-769a25b3299d/volumes" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.069789 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" path="/var/lib/kubelet/pods/fd55de82-0278-4627-870c-9b09edbca3d5/volumes" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.133731 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wl2bw_7e157548-8425-433b-a5ed-394a662310d8/registry-server/0.log" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.134432 4842 generic.go:334] "Generic (PLEG): container finished" podID="7e157548-8425-433b-a5ed-394a662310d8" containerID="06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51" exitCode=1 Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.134465 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerDied","Data":"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51"} Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.134587 4842 scope.go:117] "RemoveContainer" containerID="06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.134676 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wl2bw" event={"ID":"7e157548-8425-433b-a5ed-394a662310d8","Type":"ContainerDied","Data":"35451c5c2ae7075fe2a63e1b0c8b91c0272e239e5d1eebe95a4ae2045bcc0cdb"} Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.134770 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wl2bw" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.143743 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-94sst" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.158245 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.165254 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wl2bw"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.167510 4842 scope.go:117] "RemoveContainer" containerID="c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.192421 4842 scope.go:117] "RemoveContainer" containerID="4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.214067 4842 scope.go:117] "RemoveContainer" containerID="06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.215892 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51\": container with ID starting with 06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51 not found: ID does not exist" containerID="06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.216322 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51"} err="failed to get container status \"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51\": rpc error: code = NotFound desc = could not find container \"06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51\": container with ID starting with 06c1de44a8b44b7df09befab8d9bff1df47344f10c33d4381c62c027f80f1c51 not found: ID does not exist" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.216500 4842 scope.go:117] "RemoveContainer" containerID="c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.218487 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971\": container with ID starting with c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971 not found: ID does not exist" containerID="c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.218532 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971"} err="failed to get container status \"c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971\": rpc error: code = NotFound desc = could not find container \"c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971\": container with ID starting with c4466a6f8d92c472d4635da1fffbdebad76e7849cb110ab1ccdcbd701ab24971 not found: ID does not exist" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.218558 4842 scope.go:117] "RemoveContainer" containerID="4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.219050 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551\": container with ID starting with 4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551 not found: ID does not exist" containerID="4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.219094 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551"} err="failed to get container status \"4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551\": rpc error: code = NotFound desc = could not find container \"4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551\": container with ID starting with 4d95fd090376369d85140ac86e1de82ea0c6bbffe8f8d245a1cec679d8c7b551 not found: ID does not exist" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232218 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9lcrs"] Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232497 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232514 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232525 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232535 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232545 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232553 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232567 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232575 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232585 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232591 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232600 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232605 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232615 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232620 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232628 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232634 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232643 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232649 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232657 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232662 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232670 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232677 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="extract-utilities" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232687 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232693 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232702 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232707 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="extract-content" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232716 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232724 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232732 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce99883b-0503-48f7-9711-c7caaa523a00" containerName="marketplace-operator" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232739 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce99883b-0503-48f7-9711-c7caaa523a00" containerName="marketplace-operator" Nov 11 13:43:12 crc kubenswrapper[4842]: E1111 13:43:12.232746 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232752 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232844 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd55de82-0278-4627-870c-9b09edbca3d5" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232856 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e157548-8425-433b-a5ed-394a662310d8" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232868 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce99883b-0503-48f7-9711-c7caaa523a00" containerName="marketplace-operator" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232875 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b47536f-3d40-4117-bb3c-b7751e6bcc16" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232883 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa5e095a-f74a-470d-9070-769a25b3299d" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.232891 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3601123-3a84-4cc7-9b1a-2b56cffb00a6" containerName="registry-server" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.233665 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.235573 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.237484 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9lcrs"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.361963 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-catalog-content\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.362788 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79z2x\" (UniqueName: \"kubernetes.io/projected/7c85f60b-9964-4e28-a20c-bc21b4bc9680-kube-api-access-79z2x\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.363030 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-utilities\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.427621 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fhxng"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.428815 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.431409 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fhxng"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.431788 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.464538 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-catalog-content\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.464714 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79z2x\" (UniqueName: \"kubernetes.io/projected/7c85f60b-9964-4e28-a20c-bc21b4bc9680-kube-api-access-79z2x\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.464766 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-utilities\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.465117 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-catalog-content\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.465522 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c85f60b-9964-4e28-a20c-bc21b4bc9680-utilities\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.480410 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79z2x\" (UniqueName: \"kubernetes.io/projected/7c85f60b-9964-4e28-a20c-bc21b4bc9680-kube-api-access-79z2x\") pod \"community-operators-9lcrs\" (UID: \"7c85f60b-9964-4e28-a20c-bc21b4bc9680\") " pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.549390 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.566055 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-utilities\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.566149 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-catalog-content\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.566173 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2ncz\" (UniqueName: \"kubernetes.io/projected/1110211a-8e95-489f-ab1c-f13f4ca75b79-kube-api-access-k2ncz\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.635854 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.637214 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.649889 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.667363 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-catalog-content\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.667402 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2ncz\" (UniqueName: \"kubernetes.io/projected/1110211a-8e95-489f-ab1c-f13f4ca75b79-kube-api-access-k2ncz\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.667455 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-utilities\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.668581 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-utilities\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.668622 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1110211a-8e95-489f-ab1c-f13f4ca75b79-catalog-content\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.692450 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2ncz\" (UniqueName: \"kubernetes.io/projected/1110211a-8e95-489f-ab1c-f13f4ca75b79-kube-api-access-k2ncz\") pod \"redhat-marketplace-fhxng\" (UID: \"1110211a-8e95-489f-ab1c-f13f4ca75b79\") " pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.739087 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9lcrs"] Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.743675 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:12 crc kubenswrapper[4842]: W1111 13:43:12.745596 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c85f60b_9964_4e28_a20c_bc21b4bc9680.slice/crio-b89456705263966fa07a97fb91ff8adccfd9bf13540061cd942b3ebd732f6492 WatchSource:0}: Error finding container b89456705263966fa07a97fb91ff8adccfd9bf13540061cd942b3ebd732f6492: Status 404 returned error can't find the container with id b89456705263966fa07a97fb91ff8adccfd9bf13540061cd942b3ebd732f6492 Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.768945 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpgcg\" (UniqueName: \"kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.769012 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.769030 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.870395 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpgcg\" (UniqueName: \"kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.870471 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.870497 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.870962 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.871044 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.890717 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpgcg\" (UniqueName: \"kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg\") pod \"community-operators-sxbg6\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:12 crc kubenswrapper[4842]: I1111 13:43:12.959775 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.108619 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fhxng"] Nov 11 13:43:13 crc kubenswrapper[4842]: W1111 13:43:13.111957 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1110211a_8e95_489f_ab1c_f13f4ca75b79.slice/crio-32cb5b2acb14b0e866bcb9825edfd47bfa0ca2611b0c7040ac2c237314cb78e8 WatchSource:0}: Error finding container 32cb5b2acb14b0e866bcb9825edfd47bfa0ca2611b0c7040ac2c237314cb78e8: Status 404 returned error can't find the container with id 32cb5b2acb14b0e866bcb9825edfd47bfa0ca2611b0c7040ac2c237314cb78e8 Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.145241 4842 generic.go:334] "Generic (PLEG): container finished" podID="7c85f60b-9964-4e28-a20c-bc21b4bc9680" containerID="633ad31bf303e598b1d4332241edb9a648699c2e7dc7332ebb8e9b92d9a6425a" exitCode=0 Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.145288 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lcrs" event={"ID":"7c85f60b-9964-4e28-a20c-bc21b4bc9680","Type":"ContainerDied","Data":"633ad31bf303e598b1d4332241edb9a648699c2e7dc7332ebb8e9b92d9a6425a"} Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.145326 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lcrs" event={"ID":"7c85f60b-9964-4e28-a20c-bc21b4bc9680","Type":"ContainerStarted","Data":"b89456705263966fa07a97fb91ff8adccfd9bf13540061cd942b3ebd732f6492"} Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.150203 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fhxng" event={"ID":"1110211a-8e95-489f-ab1c-f13f4ca75b79","Type":"ContainerStarted","Data":"32cb5b2acb14b0e866bcb9825edfd47bfa0ca2611b0c7040ac2c237314cb78e8"} Nov 11 13:43:13 crc kubenswrapper[4842]: I1111 13:43:13.315878 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:13 crc kubenswrapper[4842]: W1111 13:43:13.321731 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea04edc6_5e8b_4eb9_a6e4_8e4cf79d60a1.slice/crio-45b418ddf28969ef6a385789348378e7ced034e8486d12417975a3eca416a94a WatchSource:0}: Error finding container 45b418ddf28969ef6a385789348378e7ced034e8486d12417975a3eca416a94a: Status 404 returned error can't find the container with id 45b418ddf28969ef6a385789348378e7ced034e8486d12417975a3eca416a94a Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.066019 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e157548-8425-433b-a5ed-394a662310d8" path="/var/lib/kubelet/pods/7e157548-8425-433b-a5ed-394a662310d8/volumes" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.158541 4842 generic.go:334] "Generic (PLEG): container finished" podID="7c85f60b-9964-4e28-a20c-bc21b4bc9680" containerID="2d14f8c1789db0fd2b6d7f0c866fe1afb8872bedeff89f60df9229fead3fd338" exitCode=0 Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.158638 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lcrs" event={"ID":"7c85f60b-9964-4e28-a20c-bc21b4bc9680","Type":"ContainerDied","Data":"2d14f8c1789db0fd2b6d7f0c866fe1afb8872bedeff89f60df9229fead3fd338"} Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.162216 4842 generic.go:334] "Generic (PLEG): container finished" podID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerID="526777ec7a1d29838713e3a9f2f30984348ca05729d6fd794c74c26976b80fd6" exitCode=0 Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.162292 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerDied","Data":"526777ec7a1d29838713e3a9f2f30984348ca05729d6fd794c74c26976b80fd6"} Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.162396 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerStarted","Data":"45b418ddf28969ef6a385789348378e7ced034e8486d12417975a3eca416a94a"} Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.164440 4842 generic.go:334] "Generic (PLEG): container finished" podID="1110211a-8e95-489f-ab1c-f13f4ca75b79" containerID="82dbd605edc73f27b73d0aad06fdca656b7d824866c97de5ac23d91d8a89045e" exitCode=0 Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.164497 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fhxng" event={"ID":"1110211a-8e95-489f-ab1c-f13f4ca75b79","Type":"ContainerDied","Data":"82dbd605edc73f27b73d0aad06fdca656b7d824866c97de5ac23d91d8a89045e"} Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.824628 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6wlm5"] Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.825860 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.828540 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.833090 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6wlm5"] Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.960880 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.960943 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.996605 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-utilities\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.996656 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-catalog-content\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:14 crc kubenswrapper[4842]: I1111 13:43:14.996689 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmpwk\" (UniqueName: \"kubernetes.io/projected/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-kube-api-access-rmpwk\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.020784 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wzjr7"] Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.021962 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.024212 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.034893 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wzjr7"] Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.097964 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmpwk\" (UniqueName: \"kubernetes.io/projected/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-kube-api-access-rmpwk\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.098354 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-utilities\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.098384 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-catalog-content\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.098815 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-catalog-content\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.098908 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-utilities\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.116798 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmpwk\" (UniqueName: \"kubernetes.io/projected/ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3-kube-api-access-rmpwk\") pod \"redhat-operators-6wlm5\" (UID: \"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3\") " pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.157242 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.170455 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fhxng" event={"ID":"1110211a-8e95-489f-ab1c-f13f4ca75b79","Type":"ContainerStarted","Data":"5417eb1a81f5684116c4e892aabd26d21938f9cbaf0c421903da87e3320def9d"} Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.174547 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lcrs" event={"ID":"7c85f60b-9964-4e28-a20c-bc21b4bc9680","Type":"ContainerStarted","Data":"2502fd9417a5e579ca786e6a93f040ca0dce3b5e0814853b3208a9a3459945cd"} Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.200492 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-utilities\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.200579 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-catalog-content\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.200653 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjg99\" (UniqueName: \"kubernetes.io/projected/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-kube-api-access-bjg99\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.209296 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9lcrs" podStartSLOduration=1.661569406 podStartE2EDuration="3.209276617s" podCreationTimestamp="2025-11-11 13:43:12 +0000 UTC" firstStartedPulling="2025-11-11 13:43:13.148702406 +0000 UTC m=+803.808992025" lastFinishedPulling="2025-11-11 13:43:14.696409617 +0000 UTC m=+805.356699236" observedRunningTime="2025-11-11 13:43:15.206020735 +0000 UTC m=+805.866310354" watchObservedRunningTime="2025-11-11 13:43:15.209276617 +0000 UTC m=+805.869566236" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.225825 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.227594 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.264951 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.303128 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjg99\" (UniqueName: \"kubernetes.io/projected/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-kube-api-access-bjg99\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.303222 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-utilities\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.303271 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-catalog-content\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.304538 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-catalog-content\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.304610 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-utilities\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.326315 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjg99\" (UniqueName: \"kubernetes.io/projected/f0671ff4-7fd5-4886-82ed-cd0ca45f39d1-kube-api-access-bjg99\") pod \"certified-operators-wzjr7\" (UID: \"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1\") " pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.341635 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.404749 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.404828 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.405013 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svjrl\" (UniqueName: \"kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.506077 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.506165 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svjrl\" (UniqueName: \"kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.506197 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.506624 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.506829 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.523803 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svjrl\" (UniqueName: \"kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl\") pod \"redhat-operators-gr79v\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.579887 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6wlm5"] Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.593271 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.729953 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wzjr7"] Nov 11 13:43:15 crc kubenswrapper[4842]: W1111 13:43:15.788922 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0671ff4_7fd5_4886_82ed_cd0ca45f39d1.slice/crio-ae38f9c90e0562534066802b64f0470e14bf5b4fe71d163e1f921875b1dbe1e2 WatchSource:0}: Error finding container ae38f9c90e0562534066802b64f0470e14bf5b4fe71d163e1f921875b1dbe1e2: Status 404 returned error can't find the container with id ae38f9c90e0562534066802b64f0470e14bf5b4fe71d163e1f921875b1dbe1e2 Nov 11 13:43:15 crc kubenswrapper[4842]: I1111 13:43:15.998262 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:16 crc kubenswrapper[4842]: W1111 13:43:16.007257 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8302a976_1832_48d3_a933_dac168ef853a.slice/crio-7a4f0a3a21d8c328f830e3c688d1826428377795ec7d339f420989ca62a584ef WatchSource:0}: Error finding container 7a4f0a3a21d8c328f830e3c688d1826428377795ec7d339f420989ca62a584ef: Status 404 returned error can't find the container with id 7a4f0a3a21d8c328f830e3c688d1826428377795ec7d339f420989ca62a584ef Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.180876 4842 generic.go:334] "Generic (PLEG): container finished" podID="ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3" containerID="3925646f8b1835fbc98bdf78fe30a092d62487170ea6faf47f942ea4238a91ee" exitCode=0 Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.181136 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6wlm5" event={"ID":"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3","Type":"ContainerDied","Data":"3925646f8b1835fbc98bdf78fe30a092d62487170ea6faf47f942ea4238a91ee"} Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.181267 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6wlm5" event={"ID":"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3","Type":"ContainerStarted","Data":"da65b8dd137e59534613d808dd95479c97214441aa4f021a33a51647bc8daa8d"} Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.183401 4842 generic.go:334] "Generic (PLEG): container finished" podID="1110211a-8e95-489f-ab1c-f13f4ca75b79" containerID="5417eb1a81f5684116c4e892aabd26d21938f9cbaf0c421903da87e3320def9d" exitCode=0 Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.183462 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fhxng" event={"ID":"1110211a-8e95-489f-ab1c-f13f4ca75b79","Type":"ContainerDied","Data":"5417eb1a81f5684116c4e892aabd26d21938f9cbaf0c421903da87e3320def9d"} Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.185050 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerStarted","Data":"7a4f0a3a21d8c328f830e3c688d1826428377795ec7d339f420989ca62a584ef"} Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.187860 4842 generic.go:334] "Generic (PLEG): container finished" podID="f0671ff4-7fd5-4886-82ed-cd0ca45f39d1" containerID="b9acc06a3ba46f8cfb76cf17e64534b5b2881970a2eb9a80e10c7755290fd82c" exitCode=0 Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.187994 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzjr7" event={"ID":"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1","Type":"ContainerDied","Data":"b9acc06a3ba46f8cfb76cf17e64534b5b2881970a2eb9a80e10c7755290fd82c"} Nov 11 13:43:16 crc kubenswrapper[4842]: I1111 13:43:16.188043 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzjr7" event={"ID":"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1","Type":"ContainerStarted","Data":"ae38f9c90e0562534066802b64f0470e14bf5b4fe71d163e1f921875b1dbe1e2"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.195092 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6wlm5" event={"ID":"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3","Type":"ContainerStarted","Data":"9590fb7ea53ac47a28f72d214c3bbc3d52e1786600a4ea384d73cfce733feea0"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.197439 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fhxng" event={"ID":"1110211a-8e95-489f-ab1c-f13f4ca75b79","Type":"ContainerStarted","Data":"47d5a24f7af848d71f097e0f98ca0f104028bccc2db62e9b061e9ebcd1e008b6"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.198904 4842 generic.go:334] "Generic (PLEG): container finished" podID="8302a976-1832-48d3-a933-dac168ef853a" containerID="5cca196173970d72c5b5c85d21e108d9ca8268a47c60c83c26ae56205fbe4f99" exitCode=0 Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.198996 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerDied","Data":"5cca196173970d72c5b5c85d21e108d9ca8268a47c60c83c26ae56205fbe4f99"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.203761 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzjr7" event={"ID":"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1","Type":"ContainerStarted","Data":"77fc38fed1dd1cabb4cd0aca9739480f27fd22ca04aec9a93655987cdca9efae"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.207140 4842 generic.go:334] "Generic (PLEG): container finished" podID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerID="a53d93bf3468a575bbe6ec44ffe1b27677385ccade27d3d4492627202f419d86" exitCode=0 Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.207186 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerDied","Data":"a53d93bf3468a575bbe6ec44ffe1b27677385ccade27d3d4492627202f419d86"} Nov 11 13:43:17 crc kubenswrapper[4842]: I1111 13:43:17.245890 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fhxng" podStartSLOduration=2.642854258 podStartE2EDuration="5.245866068s" podCreationTimestamp="2025-11-11 13:43:12 +0000 UTC" firstStartedPulling="2025-11-11 13:43:14.172880768 +0000 UTC m=+804.833170387" lastFinishedPulling="2025-11-11 13:43:16.775892578 +0000 UTC m=+807.436182197" observedRunningTime="2025-11-11 13:43:17.243180063 +0000 UTC m=+807.903469702" watchObservedRunningTime="2025-11-11 13:43:17.245866068 +0000 UTC m=+807.906155707" Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.214383 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerStarted","Data":"e9fbf813122ecd9116ec4bbaf7d659c6787819337a8ef9117e620e65484c17dc"} Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.216322 4842 generic.go:334] "Generic (PLEG): container finished" podID="f0671ff4-7fd5-4886-82ed-cd0ca45f39d1" containerID="77fc38fed1dd1cabb4cd0aca9739480f27fd22ca04aec9a93655987cdca9efae" exitCode=0 Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.216408 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzjr7" event={"ID":"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1","Type":"ContainerDied","Data":"77fc38fed1dd1cabb4cd0aca9739480f27fd22ca04aec9a93655987cdca9efae"} Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.218617 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerStarted","Data":"be4f63a417c13804013320eb1fb2d8285e95909e1f9e0ffe7fc32e0b406d4f7c"} Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.222085 4842 generic.go:334] "Generic (PLEG): container finished" podID="ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3" containerID="9590fb7ea53ac47a28f72d214c3bbc3d52e1786600a4ea384d73cfce733feea0" exitCode=0 Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.222199 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6wlm5" event={"ID":"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3","Type":"ContainerDied","Data":"9590fb7ea53ac47a28f72d214c3bbc3d52e1786600a4ea384d73cfce733feea0"} Nov 11 13:43:18 crc kubenswrapper[4842]: I1111 13:43:18.251911 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sxbg6" podStartSLOduration=2.647886159 podStartE2EDuration="6.251893325s" podCreationTimestamp="2025-11-11 13:43:12 +0000 UTC" firstStartedPulling="2025-11-11 13:43:14.164332737 +0000 UTC m=+804.824622376" lastFinishedPulling="2025-11-11 13:43:17.768339923 +0000 UTC m=+808.428629542" observedRunningTime="2025-11-11 13:43:18.250003935 +0000 UTC m=+808.910293554" watchObservedRunningTime="2025-11-11 13:43:18.251893325 +0000 UTC m=+808.912182944" Nov 11 13:43:19 crc kubenswrapper[4842]: I1111 13:43:19.233188 4842 generic.go:334] "Generic (PLEG): container finished" podID="8302a976-1832-48d3-a933-dac168ef853a" containerID="e9fbf813122ecd9116ec4bbaf7d659c6787819337a8ef9117e620e65484c17dc" exitCode=0 Nov 11 13:43:19 crc kubenswrapper[4842]: I1111 13:43:19.233244 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerDied","Data":"e9fbf813122ecd9116ec4bbaf7d659c6787819337a8ef9117e620e65484c17dc"} Nov 11 13:43:19 crc kubenswrapper[4842]: I1111 13:43:19.237236 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzjr7" event={"ID":"f0671ff4-7fd5-4886-82ed-cd0ca45f39d1","Type":"ContainerStarted","Data":"63b7cb053bae1aace2f7ec9a3296b31f8f982bb7d1e5e24955c85d817185098d"} Nov 11 13:43:19 crc kubenswrapper[4842]: I1111 13:43:19.240160 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6wlm5" event={"ID":"ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3","Type":"ContainerStarted","Data":"0f7b32defc71846013f016c92dfc5f14d39fad667ffeb3ea1a85e33e7c09f010"} Nov 11 13:43:19 crc kubenswrapper[4842]: I1111 13:43:19.275041 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6wlm5" podStartSLOduration=2.816841805 podStartE2EDuration="5.275022253s" podCreationTimestamp="2025-11-11 13:43:14 +0000 UTC" firstStartedPulling="2025-11-11 13:43:16.199840754 +0000 UTC m=+806.860130373" lastFinishedPulling="2025-11-11 13:43:18.658021212 +0000 UTC m=+809.318310821" observedRunningTime="2025-11-11 13:43:19.272969188 +0000 UTC m=+809.933258807" watchObservedRunningTime="2025-11-11 13:43:19.275022253 +0000 UTC m=+809.935311872" Nov 11 13:43:20 crc kubenswrapper[4842]: I1111 13:43:20.246999 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerStarted","Data":"f85d2afd953a84530a120f5a38759b13b4489ff3bb93e3ad47d5611c3d3d0e80"} Nov 11 13:43:20 crc kubenswrapper[4842]: I1111 13:43:20.271632 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wzjr7" podStartSLOduration=2.74934403 podStartE2EDuration="5.27160971s" podCreationTimestamp="2025-11-11 13:43:15 +0000 UTC" firstStartedPulling="2025-11-11 13:43:16.211490883 +0000 UTC m=+806.871780502" lastFinishedPulling="2025-11-11 13:43:18.733756563 +0000 UTC m=+809.394046182" observedRunningTime="2025-11-11 13:43:19.287236221 +0000 UTC m=+809.947525860" watchObservedRunningTime="2025-11-11 13:43:20.27160971 +0000 UTC m=+810.931899329" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.550655 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.551016 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.589962 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.613677 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gr79v" podStartSLOduration=5.166260571 podStartE2EDuration="7.613655886s" podCreationTimestamp="2025-11-11 13:43:15 +0000 UTC" firstStartedPulling="2025-11-11 13:43:17.202197344 +0000 UTC m=+807.862486963" lastFinishedPulling="2025-11-11 13:43:19.649592659 +0000 UTC m=+810.309882278" observedRunningTime="2025-11-11 13:43:20.272815559 +0000 UTC m=+810.933105188" watchObservedRunningTime="2025-11-11 13:43:22.613655886 +0000 UTC m=+813.273945505" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.745044 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.745090 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.791395 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.960625 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.960927 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:22 crc kubenswrapper[4842]: I1111 13:43:22.998240 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:23 crc kubenswrapper[4842]: I1111 13:43:23.299836 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:23 crc kubenswrapper[4842]: I1111 13:43:23.310720 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fhxng" Nov 11 13:43:23 crc kubenswrapper[4842]: I1111 13:43:23.320384 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9lcrs" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.157540 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.157692 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.214970 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.306012 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6wlm5" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.342483 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.342561 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.385164 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.412764 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.593427 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.593712 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:25 crc kubenswrapper[4842]: I1111 13:43:25.646340 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:26 crc kubenswrapper[4842]: I1111 13:43:26.278000 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sxbg6" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="registry-server" containerID="cri-o://be4f63a417c13804013320eb1fb2d8285e95909e1f9e0ffe7fc32e0b406d4f7c" gracePeriod=2 Nov 11 13:43:26 crc kubenswrapper[4842]: I1111 13:43:26.316055 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:26 crc kubenswrapper[4842]: I1111 13:43:26.331585 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wzjr7" Nov 11 13:43:27 crc kubenswrapper[4842]: I1111 13:43:27.817304 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:29 crc kubenswrapper[4842]: I1111 13:43:29.292161 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gr79v" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="registry-server" containerID="cri-o://f85d2afd953a84530a120f5a38759b13b4489ff3bb93e3ad47d5611c3d3d0e80" gracePeriod=2 Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.313138 4842 generic.go:334] "Generic (PLEG): container finished" podID="8302a976-1832-48d3-a933-dac168ef853a" containerID="f85d2afd953a84530a120f5a38759b13b4489ff3bb93e3ad47d5611c3d3d0e80" exitCode=0 Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.313249 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerDied","Data":"f85d2afd953a84530a120f5a38759b13b4489ff3bb93e3ad47d5611c3d3d0e80"} Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.315512 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sxbg6_ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1/registry-server/0.log" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.316137 4842 generic.go:334] "Generic (PLEG): container finished" podID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerID="be4f63a417c13804013320eb1fb2d8285e95909e1f9e0ffe7fc32e0b406d4f7c" exitCode=137 Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.316184 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerDied","Data":"be4f63a417c13804013320eb1fb2d8285e95909e1f9e0ffe7fc32e0b406d4f7c"} Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.470736 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sxbg6_ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1/registry-server/0.log" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.471607 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.544597 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.587589 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities\") pod \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.587689 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpgcg\" (UniqueName: \"kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg\") pod \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.587763 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities\") pod \"8302a976-1832-48d3-a933-dac168ef853a\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.587913 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content\") pod \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\" (UID: \"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.587950 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content\") pod \"8302a976-1832-48d3-a933-dac168ef853a\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.588024 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svjrl\" (UniqueName: \"kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl\") pod \"8302a976-1832-48d3-a933-dac168ef853a\" (UID: \"8302a976-1832-48d3-a933-dac168ef853a\") " Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.588468 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities" (OuterVolumeSpecName: "utilities") pod "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" (UID: "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.588526 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities" (OuterVolumeSpecName: "utilities") pod "8302a976-1832-48d3-a933-dac168ef853a" (UID: "8302a976-1832-48d3-a933-dac168ef853a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.588956 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.588978 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.594312 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg" (OuterVolumeSpecName: "kube-api-access-zpgcg") pod "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" (UID: "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1"). InnerVolumeSpecName "kube-api-access-zpgcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.600769 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl" (OuterVolumeSpecName: "kube-api-access-svjrl") pod "8302a976-1832-48d3-a933-dac168ef853a" (UID: "8302a976-1832-48d3-a933-dac168ef853a"). InnerVolumeSpecName "kube-api-access-svjrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.647426 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" (UID: "ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.692043 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svjrl\" (UniqueName: \"kubernetes.io/projected/8302a976-1832-48d3-a933-dac168ef853a-kube-api-access-svjrl\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.692307 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpgcg\" (UniqueName: \"kubernetes.io/projected/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-kube-api-access-zpgcg\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.692393 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.725213 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8302a976-1832-48d3-a933-dac168ef853a" (UID: "8302a976-1832-48d3-a933-dac168ef853a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:43:30 crc kubenswrapper[4842]: I1111 13:43:30.793565 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8302a976-1832-48d3-a933-dac168ef853a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.323849 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sxbg6_ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1/registry-server/0.log" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.325404 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sxbg6" event={"ID":"ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1","Type":"ContainerDied","Data":"45b418ddf28969ef6a385789348378e7ced034e8486d12417975a3eca416a94a"} Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.325438 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sxbg6" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.325473 4842 scope.go:117] "RemoveContainer" containerID="be4f63a417c13804013320eb1fb2d8285e95909e1f9e0ffe7fc32e0b406d4f7c" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.332993 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gr79v" event={"ID":"8302a976-1832-48d3-a933-dac168ef853a","Type":"ContainerDied","Data":"7a4f0a3a21d8c328f830e3c688d1826428377795ec7d339f420989ca62a584ef"} Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.333076 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gr79v" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.346573 4842 scope.go:117] "RemoveContainer" containerID="a53d93bf3468a575bbe6ec44ffe1b27677385ccade27d3d4492627202f419d86" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.373419 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.376338 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sxbg6"] Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.385360 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.387886 4842 scope.go:117] "RemoveContainer" containerID="526777ec7a1d29838713e3a9f2f30984348ca05729d6fd794c74c26976b80fd6" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.389872 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gr79v"] Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.401691 4842 scope.go:117] "RemoveContainer" containerID="f85d2afd953a84530a120f5a38759b13b4489ff3bb93e3ad47d5611c3d3d0e80" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.425682 4842 scope.go:117] "RemoveContainer" containerID="e9fbf813122ecd9116ec4bbaf7d659c6787819337a8ef9117e620e65484c17dc" Nov 11 13:43:31 crc kubenswrapper[4842]: I1111 13:43:31.446871 4842 scope.go:117] "RemoveContainer" containerID="5cca196173970d72c5b5c85d21e108d9ca8268a47c60c83c26ae56205fbe4f99" Nov 11 13:43:32 crc kubenswrapper[4842]: I1111 13:43:32.066021 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8302a976-1832-48d3-a933-dac168ef853a" path="/var/lib/kubelet/pods/8302a976-1832-48d3-a933-dac168ef853a/volumes" Nov 11 13:43:32 crc kubenswrapper[4842]: I1111 13:43:32.066651 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" path="/var/lib/kubelet/pods/ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1/volumes" Nov 11 13:43:44 crc kubenswrapper[4842]: I1111 13:43:44.961615 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:43:44 crc kubenswrapper[4842]: I1111 13:43:44.962223 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:43:44 crc kubenswrapper[4842]: I1111 13:43:44.962280 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:43:44 crc kubenswrapper[4842]: I1111 13:43:44.962975 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:43:44 crc kubenswrapper[4842]: I1111 13:43:44.963041 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192" gracePeriod=600 Nov 11 13:43:45 crc kubenswrapper[4842]: I1111 13:43:45.408857 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192" exitCode=0 Nov 11 13:43:45 crc kubenswrapper[4842]: I1111 13:43:45.408911 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192"} Nov 11 13:43:45 crc kubenswrapper[4842]: I1111 13:43:45.409219 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8"} Nov 11 13:43:45 crc kubenswrapper[4842]: I1111 13:43:45.409239 4842 scope.go:117] "RemoveContainer" containerID="c1ca4374e95caaa596e3c81b45f2a5043729a278a200eab109186bc210767a07" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.134424 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m"] Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.139984 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="extract-content" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140034 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="extract-content" Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.140051 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="extract-content" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140059 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="extract-content" Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.140072 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140081 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.140103 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="extract-utilities" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140111 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="extract-utilities" Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.140145 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="extract-utilities" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140154 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="extract-utilities" Nov 11 13:45:00 crc kubenswrapper[4842]: E1111 13:45:00.140163 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140170 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140297 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea04edc6-5e8b-4eb9-a6e4-8e4cf79d60a1" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140312 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8302a976-1832-48d3-a933-dac168ef853a" containerName="registry-server" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.140916 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.142294 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m"] Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.143636 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.143718 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.210098 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.210197 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trzcj\" (UniqueName: \"kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.210281 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.311515 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.311681 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.311747 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trzcj\" (UniqueName: \"kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.312873 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.319273 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.329363 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trzcj\" (UniqueName: \"kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj\") pod \"collect-profiles-29381145-4hq4m\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.462391 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.630545 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m"] Nov 11 13:45:00 crc kubenswrapper[4842]: W1111 13:45:00.636508 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20224b1f_f299_41c4_93b0_bea090f9c3cb.slice/crio-9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115 WatchSource:0}: Error finding container 9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115: Status 404 returned error can't find the container with id 9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115 Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.848275 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" event={"ID":"20224b1f-f299-41c4-93b0-bea090f9c3cb","Type":"ContainerStarted","Data":"60ee5d6aecbd428352a58f85820416e81f538a71580b78115e53e6aa4bb7d844"} Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.848316 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" event={"ID":"20224b1f-f299-41c4-93b0-bea090f9c3cb","Type":"ContainerStarted","Data":"9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115"} Nov 11 13:45:00 crc kubenswrapper[4842]: I1111 13:45:00.861642 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" podStartSLOduration=0.861626192 podStartE2EDuration="861.626192ms" podCreationTimestamp="2025-11-11 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:45:00.859299678 +0000 UTC m=+911.519589297" watchObservedRunningTime="2025-11-11 13:45:00.861626192 +0000 UTC m=+911.521915811" Nov 11 13:45:01 crc kubenswrapper[4842]: I1111 13:45:01.856658 4842 generic.go:334] "Generic (PLEG): container finished" podID="20224b1f-f299-41c4-93b0-bea090f9c3cb" containerID="60ee5d6aecbd428352a58f85820416e81f538a71580b78115e53e6aa4bb7d844" exitCode=0 Nov 11 13:45:01 crc kubenswrapper[4842]: I1111 13:45:01.856697 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" event={"ID":"20224b1f-f299-41c4-93b0-bea090f9c3cb","Type":"ContainerDied","Data":"60ee5d6aecbd428352a58f85820416e81f538a71580b78115e53e6aa4bb7d844"} Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.047300 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.151295 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trzcj\" (UniqueName: \"kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj\") pod \"20224b1f-f299-41c4-93b0-bea090f9c3cb\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.151420 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume\") pod \"20224b1f-f299-41c4-93b0-bea090f9c3cb\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.151446 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume\") pod \"20224b1f-f299-41c4-93b0-bea090f9c3cb\" (UID: \"20224b1f-f299-41c4-93b0-bea090f9c3cb\") " Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.153688 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "20224b1f-f299-41c4-93b0-bea090f9c3cb" (UID: "20224b1f-f299-41c4-93b0-bea090f9c3cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.160330 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "20224b1f-f299-41c4-93b0-bea090f9c3cb" (UID: "20224b1f-f299-41c4-93b0-bea090f9c3cb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.160376 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj" (OuterVolumeSpecName: "kube-api-access-trzcj") pod "20224b1f-f299-41c4-93b0-bea090f9c3cb" (UID: "20224b1f-f299-41c4-93b0-bea090f9c3cb"). InnerVolumeSpecName "kube-api-access-trzcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.252907 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trzcj\" (UniqueName: \"kubernetes.io/projected/20224b1f-f299-41c4-93b0-bea090f9c3cb-kube-api-access-trzcj\") on node \"crc\" DevicePath \"\"" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.252945 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/20224b1f-f299-41c4-93b0-bea090f9c3cb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.252953 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/20224b1f-f299-41c4-93b0-bea090f9c3cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.867994 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" event={"ID":"20224b1f-f299-41c4-93b0-bea090f9c3cb","Type":"ContainerDied","Data":"9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115"} Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.868038 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c00eb8cb37057c9b948172b8919b8a71967eb408a69d7acd751935a31349115" Nov 11 13:45:03 crc kubenswrapper[4842]: I1111 13:45:03.868379 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m" Nov 11 13:46:14 crc kubenswrapper[4842]: I1111 13:46:14.960996 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:46:14 crc kubenswrapper[4842]: I1111 13:46:14.961542 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:46:44 crc kubenswrapper[4842]: I1111 13:46:44.962263 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:46:44 crc kubenswrapper[4842]: I1111 13:46:44.963404 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:47:14 crc kubenswrapper[4842]: I1111 13:47:14.961380 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:47:14 crc kubenswrapper[4842]: I1111 13:47:14.962018 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:47:14 crc kubenswrapper[4842]: I1111 13:47:14.962067 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:47:14 crc kubenswrapper[4842]: I1111 13:47:14.962723 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:47:14 crc kubenswrapper[4842]: I1111 13:47:14.962785 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8" gracePeriod=600 Nov 11 13:47:15 crc kubenswrapper[4842]: I1111 13:47:15.611543 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8" exitCode=0 Nov 11 13:47:15 crc kubenswrapper[4842]: I1111 13:47:15.611604 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8"} Nov 11 13:47:15 crc kubenswrapper[4842]: I1111 13:47:15.612013 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321"} Nov 11 13:47:15 crc kubenswrapper[4842]: I1111 13:47:15.612051 4842 scope.go:117] "RemoveContainer" containerID="bb8a87fd6e5dfffa2ad7daeb56800fbec36b7bea68de40f21af5fdc7ae975192" Nov 11 13:48:50 crc kubenswrapper[4842]: I1111 13:48:50.445868 4842 scope.go:117] "RemoveContainer" containerID="890ac142175b05c2bfd3007ba3a54f8855be5fadfd7beaf1a01dbfbb1bc0fdea" Nov 11 13:48:50 crc kubenswrapper[4842]: I1111 13:48:50.465262 4842 scope.go:117] "RemoveContainer" containerID="4b2f89b510d786ea2a69bcb95f8178578798142b97355f3efcbdee146ab8b745" Nov 11 13:48:50 crc kubenswrapper[4842]: I1111 13:48:50.483639 4842 scope.go:117] "RemoveContainer" containerID="e4536dc21585100d43fc891985e93abf63345f57e8dddcb2f1a4bda2dfb93214" Nov 11 13:49:44 crc kubenswrapper[4842]: I1111 13:49:44.961225 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:49:44 crc kubenswrapper[4842]: I1111 13:49:44.961631 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:50:14 crc kubenswrapper[4842]: I1111 13:50:14.961320 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:50:14 crc kubenswrapper[4842]: I1111 13:50:14.961964 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:50:44 crc kubenswrapper[4842]: I1111 13:50:44.961511 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:50:44 crc kubenswrapper[4842]: I1111 13:50:44.962027 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:50:44 crc kubenswrapper[4842]: I1111 13:50:44.962074 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:50:44 crc kubenswrapper[4842]: I1111 13:50:44.962681 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:50:44 crc kubenswrapper[4842]: I1111 13:50:44.962733 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321" gracePeriod=600 Nov 11 13:50:45 crc kubenswrapper[4842]: I1111 13:50:45.732431 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321" exitCode=0 Nov 11 13:50:45 crc kubenswrapper[4842]: I1111 13:50:45.732491 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321"} Nov 11 13:50:45 crc kubenswrapper[4842]: I1111 13:50:45.732959 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec"} Nov 11 13:50:45 crc kubenswrapper[4842]: I1111 13:50:45.732977 4842 scope.go:117] "RemoveContainer" containerID="aa452f419e27a8505361953da4ee9d75039922c9b9553cf1ba73a84a5e1125e8" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.527032 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89"] Nov 11 13:51:35 crc kubenswrapper[4842]: E1111 13:51:35.527787 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20224b1f-f299-41c4-93b0-bea090f9c3cb" containerName="collect-profiles" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.527801 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="20224b1f-f299-41c4-93b0-bea090f9c3cb" containerName="collect-profiles" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.527936 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="20224b1f-f299-41c4-93b0-bea090f9c3cb" containerName="collect-profiles" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.528936 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.531830 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.546283 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89"] Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.632819 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.632900 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffr6m\" (UniqueName: \"kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.632986 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.734494 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.734613 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.734664 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffr6m\" (UniqueName: \"kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.735051 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.735086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.756826 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffr6m\" (UniqueName: \"kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m\") pod \"7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:35 crc kubenswrapper[4842]: I1111 13:51:35.844906 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:36 crc kubenswrapper[4842]: I1111 13:51:36.239942 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89"] Nov 11 13:51:36 crc kubenswrapper[4842]: I1111 13:51:36.993445 4842 generic.go:334] "Generic (PLEG): container finished" podID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerID="6f73ca438c776b28cd814f76aa32c1c6cb32c21b610c9bb8beec7c4e11923a62" exitCode=0 Nov 11 13:51:36 crc kubenswrapper[4842]: I1111 13:51:36.993494 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" event={"ID":"ce76dfe0-34fe-4ff7-8a17-d99f59a16522","Type":"ContainerDied","Data":"6f73ca438c776b28cd814f76aa32c1c6cb32c21b610c9bb8beec7c4e11923a62"} Nov 11 13:51:36 crc kubenswrapper[4842]: I1111 13:51:36.993707 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" event={"ID":"ce76dfe0-34fe-4ff7-8a17-d99f59a16522","Type":"ContainerStarted","Data":"ff3b3b32d33c58e588d839d96576c7abb0b22c395835d6ffbfb9aa2e38ddf035"} Nov 11 13:51:36 crc kubenswrapper[4842]: I1111 13:51:36.994855 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 13:51:39 crc kubenswrapper[4842]: I1111 13:51:39.004142 4842 generic.go:334] "Generic (PLEG): container finished" podID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerID="8229fc1f36042c07bda26cdd60e4a51d2c7c57786e1f2d2dea29815b10c0cd9b" exitCode=0 Nov 11 13:51:39 crc kubenswrapper[4842]: I1111 13:51:39.004241 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" event={"ID":"ce76dfe0-34fe-4ff7-8a17-d99f59a16522","Type":"ContainerDied","Data":"8229fc1f36042c07bda26cdd60e4a51d2c7c57786e1f2d2dea29815b10c0cd9b"} Nov 11 13:51:40 crc kubenswrapper[4842]: I1111 13:51:40.011430 4842 generic.go:334] "Generic (PLEG): container finished" podID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerID="94ab6ec240348ca564f0095f6d30c829a5f368029e9ddc4c61076d6c57bd1fca" exitCode=0 Nov 11 13:51:40 crc kubenswrapper[4842]: I1111 13:51:40.011472 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" event={"ID":"ce76dfe0-34fe-4ff7-8a17-d99f59a16522","Type":"ContainerDied","Data":"94ab6ec240348ca564f0095f6d30c829a5f368029e9ddc4c61076d6c57bd1fca"} Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.247132 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.408058 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle\") pod \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.408122 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffr6m\" (UniqueName: \"kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m\") pod \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.408146 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util\") pod \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\" (UID: \"ce76dfe0-34fe-4ff7-8a17-d99f59a16522\") " Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.408786 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle" (OuterVolumeSpecName: "bundle") pod "ce76dfe0-34fe-4ff7-8a17-d99f59a16522" (UID: "ce76dfe0-34fe-4ff7-8a17-d99f59a16522"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.415433 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m" (OuterVolumeSpecName: "kube-api-access-ffr6m") pod "ce76dfe0-34fe-4ff7-8a17-d99f59a16522" (UID: "ce76dfe0-34fe-4ff7-8a17-d99f59a16522"). InnerVolumeSpecName "kube-api-access-ffr6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.422263 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util" (OuterVolumeSpecName: "util") pod "ce76dfe0-34fe-4ff7-8a17-d99f59a16522" (UID: "ce76dfe0-34fe-4ff7-8a17-d99f59a16522"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.509226 4842 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.509975 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffr6m\" (UniqueName: \"kubernetes.io/projected/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-kube-api-access-ffr6m\") on node \"crc\" DevicePath \"\"" Nov 11 13:51:41 crc kubenswrapper[4842]: I1111 13:51:41.510028 4842 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ce76dfe0-34fe-4ff7-8a17-d99f59a16522-util\") on node \"crc\" DevicePath \"\"" Nov 11 13:51:42 crc kubenswrapper[4842]: I1111 13:51:42.027014 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" event={"ID":"ce76dfe0-34fe-4ff7-8a17-d99f59a16522","Type":"ContainerDied","Data":"ff3b3b32d33c58e588d839d96576c7abb0b22c395835d6ffbfb9aa2e38ddf035"} Nov 11 13:51:42 crc kubenswrapper[4842]: I1111 13:51:42.027417 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff3b3b32d33c58e588d839d96576c7abb0b22c395835d6ffbfb9aa2e38ddf035" Nov 11 13:51:42 crc kubenswrapper[4842]: I1111 13:51:42.027061 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.191262 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-65474b4696-mbppg"] Nov 11 13:51:43 crc kubenswrapper[4842]: E1111 13:51:43.191533 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="extract" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.191550 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="extract" Nov 11 13:51:43 crc kubenswrapper[4842]: E1111 13:51:43.191572 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="util" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.191580 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="util" Nov 11 13:51:43 crc kubenswrapper[4842]: E1111 13:51:43.191591 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="pull" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.191599 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="pull" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.191708 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce76dfe0-34fe-4ff7-8a17-d99f59a16522" containerName="extract" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.192215 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.194140 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-nv7dx" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.194204 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.195210 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.205835 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-65474b4696-mbppg"] Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.329725 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjzzn\" (UniqueName: \"kubernetes.io/projected/473113a5-da7d-4f9f-b6c9-865ed25b03fe-kube-api-access-wjzzn\") pod \"nmstate-operator-65474b4696-mbppg\" (UID: \"473113a5-da7d-4f9f-b6c9-865ed25b03fe\") " pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.430469 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjzzn\" (UniqueName: \"kubernetes.io/projected/473113a5-da7d-4f9f-b6c9-865ed25b03fe-kube-api-access-wjzzn\") pod \"nmstate-operator-65474b4696-mbppg\" (UID: \"473113a5-da7d-4f9f-b6c9-865ed25b03fe\") " pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.446607 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjzzn\" (UniqueName: \"kubernetes.io/projected/473113a5-da7d-4f9f-b6c9-865ed25b03fe-kube-api-access-wjzzn\") pod \"nmstate-operator-65474b4696-mbppg\" (UID: \"473113a5-da7d-4f9f-b6c9-865ed25b03fe\") " pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.506353 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" Nov 11 13:51:43 crc kubenswrapper[4842]: I1111 13:51:43.899123 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-65474b4696-mbppg"] Nov 11 13:51:43 crc kubenswrapper[4842]: W1111 13:51:43.904755 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod473113a5_da7d_4f9f_b6c9_865ed25b03fe.slice/crio-dad2fa7718bbf7105ede7c84535ad5a69118aeac6c5a540b4f93d3098e579096 WatchSource:0}: Error finding container dad2fa7718bbf7105ede7c84535ad5a69118aeac6c5a540b4f93d3098e579096: Status 404 returned error can't find the container with id dad2fa7718bbf7105ede7c84535ad5a69118aeac6c5a540b4f93d3098e579096 Nov 11 13:51:44 crc kubenswrapper[4842]: I1111 13:51:44.046645 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" event={"ID":"473113a5-da7d-4f9f-b6c9-865ed25b03fe","Type":"ContainerStarted","Data":"dad2fa7718bbf7105ede7c84535ad5a69118aeac6c5a540b4f93d3098e579096"} Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.068070 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" event={"ID":"473113a5-da7d-4f9f-b6c9-865ed25b03fe","Type":"ContainerStarted","Data":"198469ff29988ab9b79aa7a81b74e56c5717676f6be32b4aac2b5faf2ba3a831"} Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.081182 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-65474b4696-mbppg" podStartSLOduration=1.047424298 podStartE2EDuration="3.08116498s" podCreationTimestamp="2025-11-11 13:51:43 +0000 UTC" firstStartedPulling="2025-11-11 13:51:43.906699375 +0000 UTC m=+1314.566988994" lastFinishedPulling="2025-11-11 13:51:45.940440057 +0000 UTC m=+1316.600729676" observedRunningTime="2025-11-11 13:51:46.080380515 +0000 UTC m=+1316.740670124" watchObservedRunningTime="2025-11-11 13:51:46.08116498 +0000 UTC m=+1316.741454599" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.937743 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns"] Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.939139 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.941828 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j"] Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.942697 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.943771 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2sfzb" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.944847 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.960789 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j"] Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.964790 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns"] Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.976179 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-4rnrz"] Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.977185 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.990625 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg2f9\" (UniqueName: \"kubernetes.io/projected/836465ab-91a1-4433-9182-be504f2d4b33-kube-api-access-rg2f9\") pod \"nmstate-metrics-5dcf9c57c5-z9xns\" (UID: \"836465ab-91a1-4433-9182-be504f2d4b33\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.990848 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:46 crc kubenswrapper[4842]: I1111 13:51:46.990951 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s8j8\" (UniqueName: \"kubernetes.io/projected/34787e6f-0d9b-41f6-8cc8-682249a243a2-kube-api-access-7s8j8\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.061514 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc"] Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.062157 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.064122 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-xqbgs" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.064896 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.066244 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.075112 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc"] Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.092642 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvpgk\" (UniqueName: \"kubernetes.io/projected/d3481847-dbe0-4b95-ba37-92efb99cbc58-kube-api-access-qvpgk\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.092890 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3481847-dbe0-4b95-ba37-92efb99cbc58-plugin-serving-cert\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093011 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-ovs-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093152 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fldwh\" (UniqueName: \"kubernetes.io/projected/d3a048b3-b011-4646-a47f-c51fa6177169-kube-api-access-fldwh\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093258 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-nmstate-lock\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093384 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093483 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s8j8\" (UniqueName: \"kubernetes.io/projected/34787e6f-0d9b-41f6-8cc8-682249a243a2-kube-api-access-7s8j8\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093594 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d3481847-dbe0-4b95-ba37-92efb99cbc58-nginx-conf\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093701 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg2f9\" (UniqueName: \"kubernetes.io/projected/836465ab-91a1-4433-9182-be504f2d4b33-kube-api-access-rg2f9\") pod \"nmstate-metrics-5dcf9c57c5-z9xns\" (UID: \"836465ab-91a1-4433-9182-be504f2d4b33\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.093813 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-dbus-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: E1111 13:51:47.093515 4842 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 11 13:51:47 crc kubenswrapper[4842]: E1111 13:51:47.094052 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair podName:34787e6f-0d9b-41f6-8cc8-682249a243a2 nodeName:}" failed. No retries permitted until 2025-11-11 13:51:47.594028504 +0000 UTC m=+1318.254318203 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair") pod "nmstate-webhook-6b89b748d8-jzn5j" (UID: "34787e6f-0d9b-41f6-8cc8-682249a243a2") : secret "openshift-nmstate-webhook" not found Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.113026 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg2f9\" (UniqueName: \"kubernetes.io/projected/836465ab-91a1-4433-9182-be504f2d4b33-kube-api-access-rg2f9\") pod \"nmstate-metrics-5dcf9c57c5-z9xns\" (UID: \"836465ab-91a1-4433-9182-be504f2d4b33\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.113755 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s8j8\" (UniqueName: \"kubernetes.io/projected/34787e6f-0d9b-41f6-8cc8-682249a243a2-kube-api-access-7s8j8\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197345 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvpgk\" (UniqueName: \"kubernetes.io/projected/d3481847-dbe0-4b95-ba37-92efb99cbc58-kube-api-access-qvpgk\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197404 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-ovs-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197429 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3481847-dbe0-4b95-ba37-92efb99cbc58-plugin-serving-cert\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197453 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fldwh\" (UniqueName: \"kubernetes.io/projected/d3a048b3-b011-4646-a47f-c51fa6177169-kube-api-access-fldwh\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197480 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-nmstate-lock\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197528 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d3481847-dbe0-4b95-ba37-92efb99cbc58-nginx-conf\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197562 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-dbus-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.197875 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-dbus-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.198236 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-nmstate-lock\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.198502 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3a048b3-b011-4646-a47f-c51fa6177169-ovs-socket\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.198966 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d3481847-dbe0-4b95-ba37-92efb99cbc58-nginx-conf\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.209884 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3481847-dbe0-4b95-ba37-92efb99cbc58-plugin-serving-cert\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.217115 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fldwh\" (UniqueName: \"kubernetes.io/projected/d3a048b3-b011-4646-a47f-c51fa6177169-kube-api-access-fldwh\") pod \"nmstate-handler-4rnrz\" (UID: \"d3a048b3-b011-4646-a47f-c51fa6177169\") " pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.227620 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvpgk\" (UniqueName: \"kubernetes.io/projected/d3481847-dbe0-4b95-ba37-92efb99cbc58-kube-api-access-qvpgk\") pod \"nmstate-console-plugin-5859445d84-62bzc\" (UID: \"d3481847-dbe0-4b95-ba37-92efb99cbc58\") " pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.257466 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.281017 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7fc599dd69-qz74w"] Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.282842 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.297366 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298417 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn8wt\" (UniqueName: \"kubernetes.io/projected/1996350d-9eed-4748-bf0f-08858066b0ba-kube-api-access-sn8wt\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298453 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-oauth-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298500 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-oauth-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298519 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-console-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298542 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-service-ca\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298593 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.298648 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-trusted-ca-bundle\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.300897 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7fc599dd69-qz74w"] Nov 11 13:51:47 crc kubenswrapper[4842]: W1111 13:51:47.334500 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3a048b3_b011_4646_a47f_c51fa6177169.slice/crio-49add33c3f787fa3732bf1317d70303715d6107724252025aee0d883de916a62 WatchSource:0}: Error finding container 49add33c3f787fa3732bf1317d70303715d6107724252025aee0d883de916a62: Status 404 returned error can't find the container with id 49add33c3f787fa3732bf1317d70303715d6107724252025aee0d883de916a62 Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.379725 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400294 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400362 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-trusted-ca-bundle\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400387 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn8wt\" (UniqueName: \"kubernetes.io/projected/1996350d-9eed-4748-bf0f-08858066b0ba-kube-api-access-sn8wt\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400403 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-oauth-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400441 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-oauth-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400457 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-console-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.400654 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-service-ca\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.401432 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-console-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.401762 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-oauth-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.402435 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-service-ca\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.403059 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1996350d-9eed-4748-bf0f-08858066b0ba-trusted-ca-bundle\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.403580 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-serving-cert\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.405880 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1996350d-9eed-4748-bf0f-08858066b0ba-console-oauth-config\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.417042 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn8wt\" (UniqueName: \"kubernetes.io/projected/1996350d-9eed-4748-bf0f-08858066b0ba-kube-api-access-sn8wt\") pod \"console-7fc599dd69-qz74w\" (UID: \"1996350d-9eed-4748-bf0f-08858066b0ba\") " pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.562120 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc"] Nov 11 13:51:47 crc kubenswrapper[4842]: W1111 13:51:47.567959 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3481847_dbe0_4b95_ba37_92efb99cbc58.slice/crio-206d6c11d1e72c1e5fe0f1d5752878008f273a49de62e117ea38e644ea362493 WatchSource:0}: Error finding container 206d6c11d1e72c1e5fe0f1d5752878008f273a49de62e117ea38e644ea362493: Status 404 returned error can't find the container with id 206d6c11d1e72c1e5fe0f1d5752878008f273a49de62e117ea38e644ea362493 Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.604552 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.608977 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/34787e6f-0d9b-41f6-8cc8-682249a243a2-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzn5j\" (UID: \"34787e6f-0d9b-41f6-8cc8-682249a243a2\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.627145 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.690552 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns"] Nov 11 13:51:47 crc kubenswrapper[4842]: W1111 13:51:47.692483 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod836465ab_91a1_4433_9182_be504f2d4b33.slice/crio-f1a82e5be3136038b1003340309b4b535df1361597920873db4bfecba7d1688d WatchSource:0}: Error finding container f1a82e5be3136038b1003340309b4b535df1361597920873db4bfecba7d1688d: Status 404 returned error can't find the container with id f1a82e5be3136038b1003340309b4b535df1361597920873db4bfecba7d1688d Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.855698 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7fc599dd69-qz74w"] Nov 11 13:51:47 crc kubenswrapper[4842]: W1111 13:51:47.861618 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1996350d_9eed_4748_bf0f_08858066b0ba.slice/crio-bc0a255276d799717d9f395d8979ac1702bcde431d46f612f71ff4ca5d359d44 WatchSource:0}: Error finding container bc0a255276d799717d9f395d8979ac1702bcde431d46f612f71ff4ca5d359d44: Status 404 returned error can't find the container with id bc0a255276d799717d9f395d8979ac1702bcde431d46f612f71ff4ca5d359d44 Nov 11 13:51:47 crc kubenswrapper[4842]: I1111 13:51:47.866610 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.065148 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j"] Nov 11 13:51:48 crc kubenswrapper[4842]: W1111 13:51:48.067255 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34787e6f_0d9b_41f6_8cc8_682249a243a2.slice/crio-a1358f7dc90a4db1c259e49e3ee0796c0e14f930682ef35005d551374978e47c WatchSource:0}: Error finding container a1358f7dc90a4db1c259e49e3ee0796c0e14f930682ef35005d551374978e47c: Status 404 returned error can't find the container with id a1358f7dc90a4db1c259e49e3ee0796c0e14f930682ef35005d551374978e47c Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.076805 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-4rnrz" event={"ID":"d3a048b3-b011-4646-a47f-c51fa6177169","Type":"ContainerStarted","Data":"49add33c3f787fa3732bf1317d70303715d6107724252025aee0d883de916a62"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.078223 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" event={"ID":"836465ab-91a1-4433-9182-be504f2d4b33","Type":"ContainerStarted","Data":"f1a82e5be3136038b1003340309b4b535df1361597920873db4bfecba7d1688d"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.079454 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" event={"ID":"34787e6f-0d9b-41f6-8cc8-682249a243a2","Type":"ContainerStarted","Data":"a1358f7dc90a4db1c259e49e3ee0796c0e14f930682ef35005d551374978e47c"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.080968 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7fc599dd69-qz74w" event={"ID":"1996350d-9eed-4748-bf0f-08858066b0ba","Type":"ContainerStarted","Data":"bf2925bc038d9ef436240adbaeb6e6e11b7eaa53ccd146d043e41a648a925d2e"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.081016 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7fc599dd69-qz74w" event={"ID":"1996350d-9eed-4748-bf0f-08858066b0ba","Type":"ContainerStarted","Data":"bc0a255276d799717d9f395d8979ac1702bcde431d46f612f71ff4ca5d359d44"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.082124 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" event={"ID":"d3481847-dbe0-4b95-ba37-92efb99cbc58","Type":"ContainerStarted","Data":"206d6c11d1e72c1e5fe0f1d5752878008f273a49de62e117ea38e644ea362493"} Nov 11 13:51:48 crc kubenswrapper[4842]: I1111 13:51:48.101333 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7fc599dd69-qz74w" podStartSLOduration=1.101314481 podStartE2EDuration="1.101314481s" podCreationTimestamp="2025-11-11 13:51:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:51:48.099996479 +0000 UTC m=+1318.760286118" watchObservedRunningTime="2025-11-11 13:51:48.101314481 +0000 UTC m=+1318.761604100" Nov 11 13:51:51 crc kubenswrapper[4842]: I1111 13:51:51.113878 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" event={"ID":"d3481847-dbe0-4b95-ba37-92efb99cbc58","Type":"ContainerStarted","Data":"6fbf0b752e995d9d01a7f2f9daca1e6260fc6865042b4e80a530dfef3cc66a52"} Nov 11 13:51:51 crc kubenswrapper[4842]: I1111 13:51:51.118224 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" event={"ID":"836465ab-91a1-4433-9182-be504f2d4b33","Type":"ContainerStarted","Data":"659c38c14f1c862bf67bcecac5b5427c96975d42c4a8b028bae4ade9482e73cc"} Nov 11 13:51:51 crc kubenswrapper[4842]: I1111 13:51:51.125328 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" event={"ID":"34787e6f-0d9b-41f6-8cc8-682249a243a2","Type":"ContainerStarted","Data":"43a61254390d06c8a8be35fe8c025d33f888e9dbc31ecb366dde0071c085a52d"} Nov 11 13:51:51 crc kubenswrapper[4842]: I1111 13:51:51.130246 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5859445d84-62bzc" podStartSLOduration=1.627871477 podStartE2EDuration="4.130230575s" podCreationTimestamp="2025-11-11 13:51:47 +0000 UTC" firstStartedPulling="2025-11-11 13:51:47.57022724 +0000 UTC m=+1318.230516859" lastFinishedPulling="2025-11-11 13:51:50.072586338 +0000 UTC m=+1320.732875957" observedRunningTime="2025-11-11 13:51:51.128056456 +0000 UTC m=+1321.788346085" watchObservedRunningTime="2025-11-11 13:51:51.130230575 +0000 UTC m=+1321.790520194" Nov 11 13:51:51 crc kubenswrapper[4842]: I1111 13:51:51.150353 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" podStartSLOduration=2.231450696 podStartE2EDuration="5.150333544s" podCreationTimestamp="2025-11-11 13:51:46 +0000 UTC" firstStartedPulling="2025-11-11 13:51:48.06890242 +0000 UTC m=+1318.729192039" lastFinishedPulling="2025-11-11 13:51:50.987785268 +0000 UTC m=+1321.648074887" observedRunningTime="2025-11-11 13:51:51.148225666 +0000 UTC m=+1321.808515305" watchObservedRunningTime="2025-11-11 13:51:51.150333544 +0000 UTC m=+1321.810623163" Nov 11 13:51:52 crc kubenswrapper[4842]: I1111 13:51:52.132771 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-4rnrz" event={"ID":"d3a048b3-b011-4646-a47f-c51fa6177169","Type":"ContainerStarted","Data":"b8c5b68582be946a4ba1a0116e92d8c80de06fbd213b8f2ffc7fb3a9feec4bb6"} Nov 11 13:51:52 crc kubenswrapper[4842]: I1111 13:51:52.133044 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:51:52 crc kubenswrapper[4842]: I1111 13:51:52.149660 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-4rnrz" podStartSLOduration=2.482244998 podStartE2EDuration="6.149640597s" podCreationTimestamp="2025-11-11 13:51:46 +0000 UTC" firstStartedPulling="2025-11-11 13:51:47.336506921 +0000 UTC m=+1317.996796540" lastFinishedPulling="2025-11-11 13:51:51.00390252 +0000 UTC m=+1321.664192139" observedRunningTime="2025-11-11 13:51:52.146452016 +0000 UTC m=+1322.806741635" watchObservedRunningTime="2025-11-11 13:51:52.149640597 +0000 UTC m=+1322.809930216" Nov 11 13:51:52 crc kubenswrapper[4842]: I1111 13:51:52.298835 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:54 crc kubenswrapper[4842]: I1111 13:51:54.144441 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" event={"ID":"836465ab-91a1-4433-9182-be504f2d4b33","Type":"ContainerStarted","Data":"9008c485211a93795040378f63b9ca8fe0e45de09a290085f2b23e50de0a9be3"} Nov 11 13:51:54 crc kubenswrapper[4842]: I1111 13:51:54.164015 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z9xns" podStartSLOduration=2.804045807 podStartE2EDuration="8.163990623s" podCreationTimestamp="2025-11-11 13:51:46 +0000 UTC" firstStartedPulling="2025-11-11 13:51:47.695793971 +0000 UTC m=+1318.356083590" lastFinishedPulling="2025-11-11 13:51:53.055738787 +0000 UTC m=+1323.716028406" observedRunningTime="2025-11-11 13:51:54.159633065 +0000 UTC m=+1324.819922684" watchObservedRunningTime="2025-11-11 13:51:54.163990623 +0000 UTC m=+1324.824280252" Nov 11 13:51:57 crc kubenswrapper[4842]: I1111 13:51:57.317759 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-4rnrz" Nov 11 13:51:57 crc kubenswrapper[4842]: I1111 13:51:57.628007 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:57 crc kubenswrapper[4842]: I1111 13:51:57.628069 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:57 crc kubenswrapper[4842]: I1111 13:51:57.632957 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:58 crc kubenswrapper[4842]: I1111 13:51:58.176486 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7fc599dd69-qz74w" Nov 11 13:51:58 crc kubenswrapper[4842]: I1111 13:51:58.222288 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:52:07 crc kubenswrapper[4842]: I1111 13:52:07.880560 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzn5j" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.718457 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l"] Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.720542 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.722574 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.730145 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l"] Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.768502 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.768591 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzsp8\" (UniqueName: \"kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.768641 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.870051 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.870173 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzsp8\" (UniqueName: \"kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.870208 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.870633 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.870645 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:21 crc kubenswrapper[4842]: I1111 13:52:21.891972 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzsp8\" (UniqueName: \"kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8\") pod \"7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:22 crc kubenswrapper[4842]: I1111 13:52:22.038839 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:22 crc kubenswrapper[4842]: I1111 13:52:22.259031 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l"] Nov 11 13:52:22 crc kubenswrapper[4842]: I1111 13:52:22.315604 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" event={"ID":"73f91574-d20e-4a67-98a6-ba1841b5e35f","Type":"ContainerStarted","Data":"bbe1d8ebaf9af68ec06e1bbe590b49c4825bcb58fb2ac461da7c4268f57b5498"} Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.265677 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-lwbtb" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" containerID="cri-o://360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe" gracePeriod=15 Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.321575 4842 generic.go:334] "Generic (PLEG): container finished" podID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerID="844574c872eaca993c83bdcdddf439a8699c739b4009636bc96112d468ac8e77" exitCode=0 Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.321625 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" event={"ID":"73f91574-d20e-4a67-98a6-ba1841b5e35f","Type":"ContainerDied","Data":"844574c872eaca993c83bdcdddf439a8699c739b4009636bc96112d468ac8e77"} Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.657268 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lwbtb_cc0a9e05-e827-4489-97df-473c19eb2732/console/0.log" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.657554 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799007 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799082 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799220 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799245 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799276 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799307 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlqpc\" (UniqueName: \"kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.799342 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert\") pod \"cc0a9e05-e827-4489-97df-473c19eb2732\" (UID: \"cc0a9e05-e827-4489-97df-473c19eb2732\") " Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.800275 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config" (OuterVolumeSpecName: "console-config") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.800676 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.800811 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.800988 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca" (OuterVolumeSpecName: "service-ca") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.806285 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.807023 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc" (OuterVolumeSpecName: "kube-api-access-dlqpc") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "kube-api-access-dlqpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.807279 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "cc0a9e05-e827-4489-97df-473c19eb2732" (UID: "cc0a9e05-e827-4489-97df-473c19eb2732"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900756 4842 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-console-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900794 4842 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900804 4842 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900812 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlqpc\" (UniqueName: \"kubernetes.io/projected/cc0a9e05-e827-4489-97df-473c19eb2732-kube-api-access-dlqpc\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900821 4842 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/cc0a9e05-e827-4489-97df-473c19eb2732-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900829 4842 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:23 crc kubenswrapper[4842]: I1111 13:52:23.900837 4842 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/cc0a9e05-e827-4489-97df-473c19eb2732-service-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329724 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lwbtb_cc0a9e05-e827-4489-97df-473c19eb2732/console/0.log" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329795 4842 generic.go:334] "Generic (PLEG): container finished" podID="cc0a9e05-e827-4489-97df-473c19eb2732" containerID="360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe" exitCode=2 Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329852 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lwbtb" event={"ID":"cc0a9e05-e827-4489-97df-473c19eb2732","Type":"ContainerDied","Data":"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe"} Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329889 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lwbtb" event={"ID":"cc0a9e05-e827-4489-97df-473c19eb2732","Type":"ContainerDied","Data":"2fa3d53315dd4e6c8c88553a1a33883aa8c204a1e7a0288d469c8d5d2907c1ad"} Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329926 4842 scope.go:117] "RemoveContainer" containerID="360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.329934 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lwbtb" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.348488 4842 scope.go:117] "RemoveContainer" containerID="360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe" Nov 11 13:52:24 crc kubenswrapper[4842]: E1111 13:52:24.349067 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe\": container with ID starting with 360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe not found: ID does not exist" containerID="360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.349140 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe"} err="failed to get container status \"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe\": rpc error: code = NotFound desc = could not find container \"360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe\": container with ID starting with 360a1362c380c4487865be91176db73e9be540fb65727fa295eb0f456bafeefe not found: ID does not exist" Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.349318 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:52:24 crc kubenswrapper[4842]: I1111 13:52:24.355966 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-lwbtb"] Nov 11 13:52:25 crc kubenswrapper[4842]: I1111 13:52:25.339452 4842 generic.go:334] "Generic (PLEG): container finished" podID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerID="44ff7aa1b064d399bea55a1cd62b529855b60e30787b924e1afab9ddb89ac8b6" exitCode=0 Nov 11 13:52:25 crc kubenswrapper[4842]: I1111 13:52:25.339578 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" event={"ID":"73f91574-d20e-4a67-98a6-ba1841b5e35f","Type":"ContainerDied","Data":"44ff7aa1b064d399bea55a1cd62b529855b60e30787b924e1afab9ddb89ac8b6"} Nov 11 13:52:26 crc kubenswrapper[4842]: I1111 13:52:26.068742 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" path="/var/lib/kubelet/pods/cc0a9e05-e827-4489-97df-473c19eb2732/volumes" Nov 11 13:52:26 crc kubenswrapper[4842]: I1111 13:52:26.351190 4842 generic.go:334] "Generic (PLEG): container finished" podID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerID="8a1b88520b210b15a797fda1e17f9c99fc1f0ac9524a49171104369a71c2e29a" exitCode=0 Nov 11 13:52:26 crc kubenswrapper[4842]: I1111 13:52:26.351265 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" event={"ID":"73f91574-d20e-4a67-98a6-ba1841b5e35f","Type":"ContainerDied","Data":"8a1b88520b210b15a797fda1e17f9c99fc1f0ac9524a49171104369a71c2e29a"} Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.570388 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.749237 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzsp8\" (UniqueName: \"kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8\") pod \"73f91574-d20e-4a67-98a6-ba1841b5e35f\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.749291 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle\") pod \"73f91574-d20e-4a67-98a6-ba1841b5e35f\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.749372 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util\") pod \"73f91574-d20e-4a67-98a6-ba1841b5e35f\" (UID: \"73f91574-d20e-4a67-98a6-ba1841b5e35f\") " Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.750405 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle" (OuterVolumeSpecName: "bundle") pod "73f91574-d20e-4a67-98a6-ba1841b5e35f" (UID: "73f91574-d20e-4a67-98a6-ba1841b5e35f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.754589 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8" (OuterVolumeSpecName: "kube-api-access-lzsp8") pod "73f91574-d20e-4a67-98a6-ba1841b5e35f" (UID: "73f91574-d20e-4a67-98a6-ba1841b5e35f"). InnerVolumeSpecName "kube-api-access-lzsp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.850687 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzsp8\" (UniqueName: \"kubernetes.io/projected/73f91574-d20e-4a67-98a6-ba1841b5e35f-kube-api-access-lzsp8\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.851047 4842 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.933640 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util" (OuterVolumeSpecName: "util") pod "73f91574-d20e-4a67-98a6-ba1841b5e35f" (UID: "73f91574-d20e-4a67-98a6-ba1841b5e35f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:52:27 crc kubenswrapper[4842]: I1111 13:52:27.953009 4842 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/73f91574-d20e-4a67-98a6-ba1841b5e35f-util\") on node \"crc\" DevicePath \"\"" Nov 11 13:52:28 crc kubenswrapper[4842]: I1111 13:52:28.363155 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" event={"ID":"73f91574-d20e-4a67-98a6-ba1841b5e35f","Type":"ContainerDied","Data":"bbe1d8ebaf9af68ec06e1bbe590b49c4825bcb58fb2ac461da7c4268f57b5498"} Nov 11 13:52:28 crc kubenswrapper[4842]: I1111 13:52:28.363205 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbe1d8ebaf9af68ec06e1bbe590b49c4825bcb58fb2ac461da7c4268f57b5498" Nov 11 13:52:28 crc kubenswrapper[4842]: I1111 13:52:28.363451 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.501680 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn"] Nov 11 13:52:37 crc kubenswrapper[4842]: E1111 13:52:37.502392 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="pull" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502404 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="pull" Nov 11 13:52:37 crc kubenswrapper[4842]: E1111 13:52:37.502415 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502422 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" Nov 11 13:52:37 crc kubenswrapper[4842]: E1111 13:52:37.502430 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="extract" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502435 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="extract" Nov 11 13:52:37 crc kubenswrapper[4842]: E1111 13:52:37.502443 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="util" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502449 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="util" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502548 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc0a9e05-e827-4489-97df-473c19eb2732" containerName="console" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502566 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="73f91574-d20e-4a67-98a6-ba1841b5e35f" containerName="extract" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.502939 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.504617 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.505213 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.505485 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.505600 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.505799 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-5mvpd" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.525295 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn"] Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.678726 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-apiservice-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.678804 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frzpf\" (UniqueName: \"kubernetes.io/projected/58177de2-efee-407d-82ad-b0319114f876-kube-api-access-frzpf\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.678863 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-webhook-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.780295 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-apiservice-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.780656 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frzpf\" (UniqueName: \"kubernetes.io/projected/58177de2-efee-407d-82ad-b0319114f876-kube-api-access-frzpf\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.780766 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-webhook-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.789903 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-webhook-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.789962 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/58177de2-efee-407d-82ad-b0319114f876-apiservice-cert\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.799958 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frzpf\" (UniqueName: \"kubernetes.io/projected/58177de2-efee-407d-82ad-b0319114f876-kube-api-access-frzpf\") pod \"metallb-operator-controller-manager-579ffdf495-n54pn\" (UID: \"58177de2-efee-407d-82ad-b0319114f876\") " pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.819703 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.866025 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-8689778684-tflc4"] Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.866871 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.872778 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4mbng" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.872993 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.873147 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.880842 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8689778684-tflc4"] Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.985386 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pg7n\" (UniqueName: \"kubernetes.io/projected/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-kube-api-access-9pg7n\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.985487 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-apiservice-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:37 crc kubenswrapper[4842]: I1111 13:52:37.985513 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-webhook-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.087241 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pg7n\" (UniqueName: \"kubernetes.io/projected/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-kube-api-access-9pg7n\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.090446 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-apiservice-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.090490 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-webhook-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.114250 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-webhook-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.114844 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-apiservice-cert\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.119701 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pg7n\" (UniqueName: \"kubernetes.io/projected/f426ee4c-af44-4cc2-b9ae-8d83e3816bba-kube-api-access-9pg7n\") pod \"metallb-operator-webhook-server-8689778684-tflc4\" (UID: \"f426ee4c-af44-4cc2-b9ae-8d83e3816bba\") " pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.219264 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.297063 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn"] Nov 11 13:52:38 crc kubenswrapper[4842]: W1111 13:52:38.310197 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58177de2_efee_407d_82ad_b0319114f876.slice/crio-6e3c7345b959257e81a04e2d8f2bfff2de9581f00284b1130f10293316ae991c WatchSource:0}: Error finding container 6e3c7345b959257e81a04e2d8f2bfff2de9581f00284b1130f10293316ae991c: Status 404 returned error can't find the container with id 6e3c7345b959257e81a04e2d8f2bfff2de9581f00284b1130f10293316ae991c Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.422841 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" event={"ID":"58177de2-efee-407d-82ad-b0319114f876","Type":"ContainerStarted","Data":"6e3c7345b959257e81a04e2d8f2bfff2de9581f00284b1130f10293316ae991c"} Nov 11 13:52:38 crc kubenswrapper[4842]: I1111 13:52:38.447568 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8689778684-tflc4"] Nov 11 13:52:38 crc kubenswrapper[4842]: W1111 13:52:38.455703 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf426ee4c_af44_4cc2_b9ae_8d83e3816bba.slice/crio-6c7b1ea6a2607a44692e2ab68edcc7821d6e5d9ac009fea9fe38b7be9112e031 WatchSource:0}: Error finding container 6c7b1ea6a2607a44692e2ab68edcc7821d6e5d9ac009fea9fe38b7be9112e031: Status 404 returned error can't find the container with id 6c7b1ea6a2607a44692e2ab68edcc7821d6e5d9ac009fea9fe38b7be9112e031 Nov 11 13:52:39 crc kubenswrapper[4842]: I1111 13:52:39.430540 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" event={"ID":"f426ee4c-af44-4cc2-b9ae-8d83e3816bba","Type":"ContainerStarted","Data":"6c7b1ea6a2607a44692e2ab68edcc7821d6e5d9ac009fea9fe38b7be9112e031"} Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.459530 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" event={"ID":"58177de2-efee-407d-82ad-b0319114f876","Type":"ContainerStarted","Data":"3b8baf309a3a6f38b022b8979e0ef607b33999d9663c458694accb705b95c5af"} Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.460149 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.461604 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" event={"ID":"f426ee4c-af44-4cc2-b9ae-8d83e3816bba","Type":"ContainerStarted","Data":"2629db0c092d321301d655a40b40abd7d252cba0d27eb10ce7632e87e374e85e"} Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.461816 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.487055 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" podStartSLOduration=2.111255925 podStartE2EDuration="6.487030619s" podCreationTimestamp="2025-11-11 13:52:37 +0000 UTC" firstStartedPulling="2025-11-11 13:52:38.312853428 +0000 UTC m=+1368.973143047" lastFinishedPulling="2025-11-11 13:52:42.688628122 +0000 UTC m=+1373.348917741" observedRunningTime="2025-11-11 13:52:43.481838345 +0000 UTC m=+1374.142127964" watchObservedRunningTime="2025-11-11 13:52:43.487030619 +0000 UTC m=+1374.147320259" Nov 11 13:52:43 crc kubenswrapper[4842]: I1111 13:52:43.500900 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" podStartSLOduration=2.238148789 podStartE2EDuration="6.5008837s" podCreationTimestamp="2025-11-11 13:52:37 +0000 UTC" firstStartedPulling="2025-11-11 13:52:38.459263162 +0000 UTC m=+1369.119552781" lastFinishedPulling="2025-11-11 13:52:42.721998073 +0000 UTC m=+1373.382287692" observedRunningTime="2025-11-11 13:52:43.498958329 +0000 UTC m=+1374.159247948" watchObservedRunningTime="2025-11-11 13:52:43.5008837 +0000 UTC m=+1374.161173319" Nov 11 13:52:58 crc kubenswrapper[4842]: I1111 13:52:58.237521 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-8689778684-tflc4" Nov 11 13:53:14 crc kubenswrapper[4842]: I1111 13:53:14.961403 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:53:14 crc kubenswrapper[4842]: I1111 13:53:14.961994 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:53:17 crc kubenswrapper[4842]: I1111 13:53:17.822680 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-579ffdf495-n54pn" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.509256 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-x8q5h"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.511798 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.513696 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-x8k57" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.513709 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.515220 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.515922 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.517036 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.518211 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.525854 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.615852 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-vjwsf"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.617052 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.622834 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.623001 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.623121 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.623611 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-7bchz" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.639601 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-z8m9r"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.640600 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.643841 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.650591 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-z8m9r"] Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.691987 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-conf\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692057 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692111 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics-certs\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692140 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-reloader\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692170 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-cert\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692189 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-sockets\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.692951 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-startup\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.693003 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwd8r\" (UniqueName: \"kubernetes.io/projected/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-kube-api-access-kwd8r\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.693037 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-848wt\" (UniqueName: \"kubernetes.io/projected/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-kube-api-access-848wt\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794478 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-cert\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794532 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-cert\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794556 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-sockets\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794592 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-metrics-certs\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794614 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-startup\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794650 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794670 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42vx9\" (UniqueName: \"kubernetes.io/projected/12b20cff-a77f-429a-81f4-ec7e34de65e9-kube-api-access-42vx9\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794697 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwd8r\" (UniqueName: \"kubernetes.io/projected/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-kube-api-access-kwd8r\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794722 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/5e9d7317-e2f0-4262-a288-adec1afe4657-metallb-excludel2\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794746 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-848wt\" (UniqueName: \"kubernetes.io/projected/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-kube-api-access-848wt\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794771 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-conf\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794795 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-metrics-certs\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794814 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgjjj\" (UniqueName: \"kubernetes.io/projected/5e9d7317-e2f0-4262-a288-adec1afe4657-kube-api-access-hgjjj\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794840 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794859 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics-certs\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.794881 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-reloader\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.795348 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-reloader\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.796145 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-sockets\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.796638 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.796943 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-conf\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.797058 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-frr-startup\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.801327 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-cert\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.807527 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-metrics-certs\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.812384 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-848wt\" (UniqueName: \"kubernetes.io/projected/9ea46a4b-b757-47ae-a1b1-d7e82c5980e0-kube-api-access-848wt\") pod \"frr-k8s-webhook-server-6fbb69bdf8-gf25w\" (UID: \"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0\") " pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.813764 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwd8r\" (UniqueName: \"kubernetes.io/projected/b09ac28a-d43e-4609-b418-5e7f1d7e22a9-kube-api-access-kwd8r\") pod \"frr-k8s-x8q5h\" (UID: \"b09ac28a-d43e-4609-b418-5e7f1d7e22a9\") " pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.830670 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.838765 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896555 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-cert\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896681 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-metrics-certs\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896733 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896754 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42vx9\" (UniqueName: \"kubernetes.io/projected/12b20cff-a77f-429a-81f4-ec7e34de65e9-kube-api-access-42vx9\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896781 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/5e9d7317-e2f0-4262-a288-adec1afe4657-metallb-excludel2\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896809 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-metrics-certs\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.896834 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgjjj\" (UniqueName: \"kubernetes.io/projected/5e9d7317-e2f0-4262-a288-adec1afe4657-kube-api-access-hgjjj\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: E1111 13:53:18.897151 4842 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 11 13:53:18 crc kubenswrapper[4842]: E1111 13:53:18.897205 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist podName:5e9d7317-e2f0-4262-a288-adec1afe4657 nodeName:}" failed. No retries permitted until 2025-11-11 13:53:19.397187382 +0000 UTC m=+1410.057477001 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist") pod "speaker-vjwsf" (UID: "5e9d7317-e2f0-4262-a288-adec1afe4657") : secret "metallb-memberlist" not found Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.898181 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/5e9d7317-e2f0-4262-a288-adec1afe4657-metallb-excludel2\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.899382 4842 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.901132 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-metrics-certs\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.909953 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-cert\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.910728 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12b20cff-a77f-429a-81f4-ec7e34de65e9-metrics-certs\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.919657 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42vx9\" (UniqueName: \"kubernetes.io/projected/12b20cff-a77f-429a-81f4-ec7e34de65e9-kube-api-access-42vx9\") pod \"controller-6c7b4b5f48-z8m9r\" (UID: \"12b20cff-a77f-429a-81f4-ec7e34de65e9\") " pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.924485 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgjjj\" (UniqueName: \"kubernetes.io/projected/5e9d7317-e2f0-4262-a288-adec1afe4657-kube-api-access-hgjjj\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:18 crc kubenswrapper[4842]: I1111 13:53:18.956110 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.268897 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w"] Nov 11 13:53:19 crc kubenswrapper[4842]: W1111 13:53:19.273864 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea46a4b_b757_47ae_a1b1_d7e82c5980e0.slice/crio-6072b179324fda9a5213a31a59ce74835afba6f5db8acb5b5ccae17f0748ff8c WatchSource:0}: Error finding container 6072b179324fda9a5213a31a59ce74835afba6f5db8acb5b5ccae17f0748ff8c: Status 404 returned error can't find the container with id 6072b179324fda9a5213a31a59ce74835afba6f5db8acb5b5ccae17f0748ff8c Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.349700 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-z8m9r"] Nov 11 13:53:19 crc kubenswrapper[4842]: W1111 13:53:19.353884 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12b20cff_a77f_429a_81f4_ec7e34de65e9.slice/crio-4e02cecc73625478d6c84e13c28009e40b0e2d2eaeee1f3c3bf1659e88e52aba WatchSource:0}: Error finding container 4e02cecc73625478d6c84e13c28009e40b0e2d2eaeee1f3c3bf1659e88e52aba: Status 404 returned error can't find the container with id 4e02cecc73625478d6c84e13c28009e40b0e2d2eaeee1f3c3bf1659e88e52aba Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.401228 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:19 crc kubenswrapper[4842]: E1111 13:53:19.401452 4842 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 11 13:53:19 crc kubenswrapper[4842]: E1111 13:53:19.401653 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist podName:5e9d7317-e2f0-4262-a288-adec1afe4657 nodeName:}" failed. No retries permitted until 2025-11-11 13:53:20.401635406 +0000 UTC m=+1411.061925025 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist") pod "speaker-vjwsf" (UID: "5e9d7317-e2f0-4262-a288-adec1afe4657") : secret "metallb-memberlist" not found Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.652085 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" event={"ID":"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0","Type":"ContainerStarted","Data":"6072b179324fda9a5213a31a59ce74835afba6f5db8acb5b5ccae17f0748ff8c"} Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.653003 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"810ea1971d068f16587941f1c83dd542ba82e6c76e12d5931274689b7bc8b045"} Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.654867 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-z8m9r" event={"ID":"12b20cff-a77f-429a-81f4-ec7e34de65e9","Type":"ContainerStarted","Data":"a0fd59125795f0202fad160e118595471bda57c70567fcd9f1e1d085836b8f27"} Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.654899 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-z8m9r" event={"ID":"12b20cff-a77f-429a-81f4-ec7e34de65e9","Type":"ContainerStarted","Data":"c4b848e8344bae1f7371675ddbb1ce26037390476d7a81ffabb3c742bcd67a17"} Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.654915 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-z8m9r" event={"ID":"12b20cff-a77f-429a-81f4-ec7e34de65e9","Type":"ContainerStarted","Data":"4e02cecc73625478d6c84e13c28009e40b0e2d2eaeee1f3c3bf1659e88e52aba"} Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.654944 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:19 crc kubenswrapper[4842]: I1111 13:53:19.671476 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-z8m9r" podStartSLOduration=1.671454061 podStartE2EDuration="1.671454061s" podCreationTimestamp="2025-11-11 13:53:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:53:19.669283972 +0000 UTC m=+1410.329573591" watchObservedRunningTime="2025-11-11 13:53:19.671454061 +0000 UTC m=+1410.331743680" Nov 11 13:53:20 crc kubenswrapper[4842]: I1111 13:53:20.413672 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:20 crc kubenswrapper[4842]: I1111 13:53:20.419986 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/5e9d7317-e2f0-4262-a288-adec1afe4657-memberlist\") pod \"speaker-vjwsf\" (UID: \"5e9d7317-e2f0-4262-a288-adec1afe4657\") " pod="metallb-system/speaker-vjwsf" Nov 11 13:53:20 crc kubenswrapper[4842]: I1111 13:53:20.434521 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-vjwsf" Nov 11 13:53:20 crc kubenswrapper[4842]: W1111 13:53:20.469906 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e9d7317_e2f0_4262_a288_adec1afe4657.slice/crio-360e785093028d192e849daaea5ab8ee71746626a15a4a3bfcd488261cf5dd25 WatchSource:0}: Error finding container 360e785093028d192e849daaea5ab8ee71746626a15a4a3bfcd488261cf5dd25: Status 404 returned error can't find the container with id 360e785093028d192e849daaea5ab8ee71746626a15a4a3bfcd488261cf5dd25 Nov 11 13:53:20 crc kubenswrapper[4842]: I1111 13:53:20.670472 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vjwsf" event={"ID":"5e9d7317-e2f0-4262-a288-adec1afe4657","Type":"ContainerStarted","Data":"360e785093028d192e849daaea5ab8ee71746626a15a4a3bfcd488261cf5dd25"} Nov 11 13:53:21 crc kubenswrapper[4842]: I1111 13:53:21.683265 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vjwsf" event={"ID":"5e9d7317-e2f0-4262-a288-adec1afe4657","Type":"ContainerStarted","Data":"02cec0f0b792cbf195301a961c94dbf77b50282680349bd3e1708b36686c24df"} Nov 11 13:53:21 crc kubenswrapper[4842]: I1111 13:53:21.683648 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-vjwsf" Nov 11 13:53:21 crc kubenswrapper[4842]: I1111 13:53:21.683661 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-vjwsf" event={"ID":"5e9d7317-e2f0-4262-a288-adec1afe4657","Type":"ContainerStarted","Data":"7019f10591cc56f1d7c665ef4c9bd8298450f47398a31faabb2f25a8eb80ed6d"} Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.711596 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" event={"ID":"9ea46a4b-b757-47ae-a1b1-d7e82c5980e0","Type":"ContainerStarted","Data":"7fdfb443d1957e195c1230b54310de2a2a27c01cfd1b03f43da940aab8a0224c"} Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.712239 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.713642 4842 generic.go:334] "Generic (PLEG): container finished" podID="b09ac28a-d43e-4609-b418-5e7f1d7e22a9" containerID="60e3e1be78059ebac7b36ad7f056c8461adab6323e9ceb84db794517b419b874" exitCode=0 Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.713679 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerDied","Data":"60e3e1be78059ebac7b36ad7f056c8461adab6323e9ceb84db794517b419b874"} Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.730748 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" podStartSLOduration=2.31100829 podStartE2EDuration="8.730724351s" podCreationTimestamp="2025-11-11 13:53:18 +0000 UTC" firstStartedPulling="2025-11-11 13:53:19.275795805 +0000 UTC m=+1409.936085424" lastFinishedPulling="2025-11-11 13:53:25.695511866 +0000 UTC m=+1416.355801485" observedRunningTime="2025-11-11 13:53:26.724472012 +0000 UTC m=+1417.384761631" watchObservedRunningTime="2025-11-11 13:53:26.730724351 +0000 UTC m=+1417.391013960" Nov 11 13:53:26 crc kubenswrapper[4842]: I1111 13:53:26.731088 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-vjwsf" podStartSLOduration=8.731082741 podStartE2EDuration="8.731082741s" podCreationTimestamp="2025-11-11 13:53:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:53:21.704903805 +0000 UTC m=+1412.365193464" watchObservedRunningTime="2025-11-11 13:53:26.731082741 +0000 UTC m=+1417.391372360" Nov 11 13:53:27 crc kubenswrapper[4842]: I1111 13:53:27.720205 4842 generic.go:334] "Generic (PLEG): container finished" podID="b09ac28a-d43e-4609-b418-5e7f1d7e22a9" containerID="495c8031fd843ec3d5857f4c72414b64919c87d1f4164bf49431063ba7019e04" exitCode=0 Nov 11 13:53:27 crc kubenswrapper[4842]: I1111 13:53:27.720320 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerDied","Data":"495c8031fd843ec3d5857f4c72414b64919c87d1f4164bf49431063ba7019e04"} Nov 11 13:53:28 crc kubenswrapper[4842]: I1111 13:53:28.729281 4842 generic.go:334] "Generic (PLEG): container finished" podID="b09ac28a-d43e-4609-b418-5e7f1d7e22a9" containerID="734958d0f00495ef7ae813940610021ac6f813ca749a853ed7ace8d94a1fda95" exitCode=0 Nov 11 13:53:28 crc kubenswrapper[4842]: I1111 13:53:28.730844 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerDied","Data":"734958d0f00495ef7ae813940610021ac6f813ca749a853ed7ace8d94a1fda95"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.739666 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"520c7f3a3cd0a07b25620229a5d9c0831f062b5cc0a7630985ccb93a64dd6c5e"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740061 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"41fe94fdc3f4d7036a59f90d7c2f276a3eff6c68d79ecb3091f02acc4ab8e996"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740087 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"4fdc0739f4d3ff08fe982911f4f988c6cc9dc4786bdb87378e6262b8ac8944e3"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740216 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"673d5e1be82a15da6da59758f930f61f25b1ebd47cc734e19d05044330d6256b"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740238 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"2d50ae8739fabd2bf4d0caa22809e3b15ee831ca0ef9dde372a8178df5ea9c28"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740254 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-x8q5h" event={"ID":"b09ac28a-d43e-4609-b418-5e7f1d7e22a9","Type":"ContainerStarted","Data":"5bf0aac44b2ac27ee6eed33483a27d5904b74939de06bd68ae7c1ad65999e25a"} Nov 11 13:53:29 crc kubenswrapper[4842]: I1111 13:53:29.740573 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:30 crc kubenswrapper[4842]: I1111 13:53:30.437905 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-vjwsf" Nov 11 13:53:30 crc kubenswrapper[4842]: I1111 13:53:30.461079 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-x8q5h" podStartSLOduration=5.782938283 podStartE2EDuration="12.461055408s" podCreationTimestamp="2025-11-11 13:53:18 +0000 UTC" firstStartedPulling="2025-11-11 13:53:19.002931392 +0000 UTC m=+1409.663221011" lastFinishedPulling="2025-11-11 13:53:25.681048507 +0000 UTC m=+1416.341338136" observedRunningTime="2025-11-11 13:53:29.766865164 +0000 UTC m=+1420.427154783" watchObservedRunningTime="2025-11-11 13:53:30.461055408 +0000 UTC m=+1421.121345027" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.103064 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.104410 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.109175 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.109218 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-zzfqd" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.109400 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.116542 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.280383 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlp6j\" (UniqueName: \"kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j\") pod \"openstack-operator-index-sq4nz\" (UID: \"d265087c-e25a-4a9b-ba0f-933c082bd78b\") " pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.382347 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlp6j\" (UniqueName: \"kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j\") pod \"openstack-operator-index-sq4nz\" (UID: \"d265087c-e25a-4a9b-ba0f-933c082bd78b\") " pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.404652 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlp6j\" (UniqueName: \"kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j\") pod \"openstack-operator-index-sq4nz\" (UID: \"d265087c-e25a-4a9b-ba0f-933c082bd78b\") " pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.423446 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.831189 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.846376 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:33 crc kubenswrapper[4842]: W1111 13:53:33.859327 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd265087c_e25a_4a9b_ba0f_933c082bd78b.slice/crio-ee168e14fd512ca9d3915775cc9786a7b3d6feecbd31e8061b10fcd219b4b5b5 WatchSource:0}: Error finding container ee168e14fd512ca9d3915775cc9786a7b3d6feecbd31e8061b10fcd219b4b5b5: Status 404 returned error can't find the container with id ee168e14fd512ca9d3915775cc9786a7b3d6feecbd31e8061b10fcd219b4b5b5 Nov 11 13:53:33 crc kubenswrapper[4842]: I1111 13:53:33.882386 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:34 crc kubenswrapper[4842]: I1111 13:53:34.768033 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sq4nz" event={"ID":"d265087c-e25a-4a9b-ba0f-933c082bd78b","Type":"ContainerStarted","Data":"ee168e14fd512ca9d3915775cc9786a7b3d6feecbd31e8061b10fcd219b4b5b5"} Nov 11 13:53:36 crc kubenswrapper[4842]: I1111 13:53:36.488333 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:36 crc kubenswrapper[4842]: I1111 13:53:36.781785 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sq4nz" event={"ID":"d265087c-e25a-4a9b-ba0f-933c082bd78b","Type":"ContainerStarted","Data":"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99"} Nov 11 13:53:36 crc kubenswrapper[4842]: I1111 13:53:36.799553 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-sq4nz" podStartSLOduration=1.326667545 podStartE2EDuration="3.799536786s" podCreationTimestamp="2025-11-11 13:53:33 +0000 UTC" firstStartedPulling="2025-11-11 13:53:33.860988334 +0000 UTC m=+1424.521277953" lastFinishedPulling="2025-11-11 13:53:36.333857575 +0000 UTC m=+1426.994147194" observedRunningTime="2025-11-11 13:53:36.79840677 +0000 UTC m=+1427.458696389" watchObservedRunningTime="2025-11-11 13:53:36.799536786 +0000 UTC m=+1427.459826405" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.090390 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-clx45"] Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.091163 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.100588 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-clx45"] Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.242647 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tw44\" (UniqueName: \"kubernetes.io/projected/249af774-2a96-4177-bede-702ebe9025c9-kube-api-access-8tw44\") pod \"openstack-operator-index-clx45\" (UID: \"249af774-2a96-4177-bede-702ebe9025c9\") " pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.343822 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tw44\" (UniqueName: \"kubernetes.io/projected/249af774-2a96-4177-bede-702ebe9025c9-kube-api-access-8tw44\") pod \"openstack-operator-index-clx45\" (UID: \"249af774-2a96-4177-bede-702ebe9025c9\") " pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.362072 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tw44\" (UniqueName: \"kubernetes.io/projected/249af774-2a96-4177-bede-702ebe9025c9-kube-api-access-8tw44\") pod \"openstack-operator-index-clx45\" (UID: \"249af774-2a96-4177-bede-702ebe9025c9\") " pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.406016 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.788534 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-sq4nz" podUID="d265087c-e25a-4a9b-ba0f-933c082bd78b" containerName="registry-server" containerID="cri-o://c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99" gracePeriod=2 Nov 11 13:53:37 crc kubenswrapper[4842]: I1111 13:53:37.800594 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-clx45"] Nov 11 13:53:37 crc kubenswrapper[4842]: W1111 13:53:37.804624 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod249af774_2a96_4177_bede_702ebe9025c9.slice/crio-e57cd5ef8f4c95ea71906db78cbedc9c6c9cd3f57fcfef4e55fc3d3496c1221c WatchSource:0}: Error finding container e57cd5ef8f4c95ea71906db78cbedc9c6c9cd3f57fcfef4e55fc3d3496c1221c: Status 404 returned error can't find the container with id e57cd5ef8f4c95ea71906db78cbedc9c6c9cd3f57fcfef4e55fc3d3496c1221c Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.087703 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.256650 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dlp6j\" (UniqueName: \"kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j\") pod \"d265087c-e25a-4a9b-ba0f-933c082bd78b\" (UID: \"d265087c-e25a-4a9b-ba0f-933c082bd78b\") " Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.261704 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j" (OuterVolumeSpecName: "kube-api-access-dlp6j") pod "d265087c-e25a-4a9b-ba0f-933c082bd78b" (UID: "d265087c-e25a-4a9b-ba0f-933c082bd78b"). InnerVolumeSpecName "kube-api-access-dlp6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.360184 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dlp6j\" (UniqueName: \"kubernetes.io/projected/d265087c-e25a-4a9b-ba0f-933c082bd78b-kube-api-access-dlp6j\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.795406 4842 generic.go:334] "Generic (PLEG): container finished" podID="d265087c-e25a-4a9b-ba0f-933c082bd78b" containerID="c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99" exitCode=0 Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.795736 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sq4nz" event={"ID":"d265087c-e25a-4a9b-ba0f-933c082bd78b","Type":"ContainerDied","Data":"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99"} Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.795771 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sq4nz" event={"ID":"d265087c-e25a-4a9b-ba0f-933c082bd78b","Type":"ContainerDied","Data":"ee168e14fd512ca9d3915775cc9786a7b3d6feecbd31e8061b10fcd219b4b5b5"} Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.795793 4842 scope.go:117] "RemoveContainer" containerID="c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.795905 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sq4nz" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.800878 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-clx45" event={"ID":"249af774-2a96-4177-bede-702ebe9025c9","Type":"ContainerStarted","Data":"430f4aabad646b1bbb17dac0b99834a9de1d6cd04d56228014ab7dc39afe4af7"} Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.800924 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-clx45" event={"ID":"249af774-2a96-4177-bede-702ebe9025c9","Type":"ContainerStarted","Data":"e57cd5ef8f4c95ea71906db78cbedc9c6c9cd3f57fcfef4e55fc3d3496c1221c"} Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.819744 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-clx45" podStartSLOduration=1.7778294 podStartE2EDuration="1.819728933s" podCreationTimestamp="2025-11-11 13:53:37 +0000 UTC" firstStartedPulling="2025-11-11 13:53:37.809151289 +0000 UTC m=+1428.469440908" lastFinishedPulling="2025-11-11 13:53:37.851050822 +0000 UTC m=+1428.511340441" observedRunningTime="2025-11-11 13:53:38.818350309 +0000 UTC m=+1429.478639938" watchObservedRunningTime="2025-11-11 13:53:38.819728933 +0000 UTC m=+1429.480018542" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.825723 4842 scope.go:117] "RemoveContainer" containerID="c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99" Nov 11 13:53:38 crc kubenswrapper[4842]: E1111 13:53:38.826498 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99\": container with ID starting with c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99 not found: ID does not exist" containerID="c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.826541 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99"} err="failed to get container status \"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99\": rpc error: code = NotFound desc = could not find container \"c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99\": container with ID starting with c846963d1d943b297d2021621dc8727df3c4e5890f623773081f8640ff6d6a99 not found: ID does not exist" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.837260 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.837610 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-x8q5h" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.841542 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-sq4nz"] Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.850329 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6fbb69bdf8-gf25w" Nov 11 13:53:38 crc kubenswrapper[4842]: I1111 13:53:38.960472 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-z8m9r" Nov 11 13:53:40 crc kubenswrapper[4842]: I1111 13:53:40.067746 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d265087c-e25a-4a9b-ba0f-933c082bd78b" path="/var/lib/kubelet/pods/d265087c-e25a-4a9b-ba0f-933c082bd78b/volumes" Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.899088 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:53:44 crc kubenswrapper[4842]: E1111 13:53:44.899617 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d265087c-e25a-4a9b-ba0f-933c082bd78b" containerName="registry-server" Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.899630 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d265087c-e25a-4a9b-ba0f-933c082bd78b" containerName="registry-server" Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.899746 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d265087c-e25a-4a9b-ba0f-933c082bd78b" containerName="registry-server" Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.900741 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.908066 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.961190 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:53:44 crc kubenswrapper[4842]: I1111 13:53:44.961256 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.045648 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.045759 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.045800 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzlnx\" (UniqueName: \"kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.146837 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzlnx\" (UniqueName: \"kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.146902 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.146967 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.147424 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.147527 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.167020 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzlnx\" (UniqueName: \"kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx\") pod \"redhat-operators-w6pdl\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.225305 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.691014 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:53:45 crc kubenswrapper[4842]: I1111 13:53:45.845522 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerStarted","Data":"fe586743f0811d2de76a4c97758e6c391df6367c6eceef55bbac9e3056642d43"} Nov 11 13:53:46 crc kubenswrapper[4842]: I1111 13:53:46.853948 4842 generic.go:334] "Generic (PLEG): container finished" podID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerID="7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b" exitCode=0 Nov 11 13:53:46 crc kubenswrapper[4842]: I1111 13:53:46.854133 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerDied","Data":"7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b"} Nov 11 13:53:47 crc kubenswrapper[4842]: I1111 13:53:47.406529 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:47 crc kubenswrapper[4842]: I1111 13:53:47.406588 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:47 crc kubenswrapper[4842]: I1111 13:53:47.429854 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:47 crc kubenswrapper[4842]: I1111 13:53:47.861337 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerStarted","Data":"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0"} Nov 11 13:53:47 crc kubenswrapper[4842]: I1111 13:53:47.885301 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-clx45" Nov 11 13:53:48 crc kubenswrapper[4842]: I1111 13:53:48.868137 4842 generic.go:334] "Generic (PLEG): container finished" podID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerID="906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0" exitCode=0 Nov 11 13:53:48 crc kubenswrapper[4842]: I1111 13:53:48.868205 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerDied","Data":"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0"} Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.874361 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerStarted","Data":"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f"} Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.895321 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w6pdl" podStartSLOduration=3.394962658 podStartE2EDuration="5.895301673s" podCreationTimestamp="2025-11-11 13:53:44 +0000 UTC" firstStartedPulling="2025-11-11 13:53:46.856331625 +0000 UTC m=+1437.516621244" lastFinishedPulling="2025-11-11 13:53:49.35667064 +0000 UTC m=+1440.016960259" observedRunningTime="2025-11-11 13:53:49.891500084 +0000 UTC m=+1440.551789703" watchObservedRunningTime="2025-11-11 13:53:49.895301673 +0000 UTC m=+1440.555591292" Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.933416 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm"] Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.934575 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.937663 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qs4m8" Nov 11 13:53:49 crc kubenswrapper[4842]: I1111 13:53:49.944283 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm"] Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.107974 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.108116 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.108150 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvwns\" (UniqueName: \"kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.210888 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.210975 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvwns\" (UniqueName: \"kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.211019 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.211328 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.211681 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.231528 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvwns\" (UniqueName: \"kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns\") pod \"eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.250008 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qs4m8" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.258488 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.682402 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm"] Nov 11 13:53:50 crc kubenswrapper[4842]: W1111 13:53:50.685916 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17e735d4_82c6_4083_bd01_382013995cc2.slice/crio-1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151 WatchSource:0}: Error finding container 1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151: Status 404 returned error can't find the container with id 1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151 Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.880397 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerStarted","Data":"5059199d4d49bcefc8e5108fb5eed4330c5a30b91bcaee7b6a69690262a017a6"} Nov 11 13:53:50 crc kubenswrapper[4842]: I1111 13:53:50.880457 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerStarted","Data":"1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151"} Nov 11 13:53:51 crc kubenswrapper[4842]: I1111 13:53:51.892408 4842 generic.go:334] "Generic (PLEG): container finished" podID="17e735d4-82c6-4083-bd01-382013995cc2" containerID="5059199d4d49bcefc8e5108fb5eed4330c5a30b91bcaee7b6a69690262a017a6" exitCode=0 Nov 11 13:53:51 crc kubenswrapper[4842]: I1111 13:53:51.892453 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerDied","Data":"5059199d4d49bcefc8e5108fb5eed4330c5a30b91bcaee7b6a69690262a017a6"} Nov 11 13:53:52 crc kubenswrapper[4842]: I1111 13:53:52.899122 4842 generic.go:334] "Generic (PLEG): container finished" podID="17e735d4-82c6-4083-bd01-382013995cc2" containerID="55e5294b10fcab710541c4c263248900077df49ada356aeeeebc1ac275c47c6a" exitCode=0 Nov 11 13:53:52 crc kubenswrapper[4842]: I1111 13:53:52.899177 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerDied","Data":"55e5294b10fcab710541c4c263248900077df49ada356aeeeebc1ac275c47c6a"} Nov 11 13:53:53 crc kubenswrapper[4842]: I1111 13:53:53.906121 4842 generic.go:334] "Generic (PLEG): container finished" podID="17e735d4-82c6-4083-bd01-382013995cc2" containerID="c3c22ace5f53a438affd038a39904a7b70d5b554c5dcc4fc9be1f22a93aab4e3" exitCode=0 Nov 11 13:53:53 crc kubenswrapper[4842]: I1111 13:53:53.906200 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerDied","Data":"c3c22ace5f53a438affd038a39904a7b70d5b554c5dcc4fc9be1f22a93aab4e3"} Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.122334 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.184325 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvwns\" (UniqueName: \"kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns\") pod \"17e735d4-82c6-4083-bd01-382013995cc2\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.184665 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util\") pod \"17e735d4-82c6-4083-bd01-382013995cc2\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.184707 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle\") pod \"17e735d4-82c6-4083-bd01-382013995cc2\" (UID: \"17e735d4-82c6-4083-bd01-382013995cc2\") " Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.185511 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle" (OuterVolumeSpecName: "bundle") pod "17e735d4-82c6-4083-bd01-382013995cc2" (UID: "17e735d4-82c6-4083-bd01-382013995cc2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.189864 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns" (OuterVolumeSpecName: "kube-api-access-gvwns") pod "17e735d4-82c6-4083-bd01-382013995cc2" (UID: "17e735d4-82c6-4083-bd01-382013995cc2"). InnerVolumeSpecName "kube-api-access-gvwns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.201207 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util" (OuterVolumeSpecName: "util") pod "17e735d4-82c6-4083-bd01-382013995cc2" (UID: "17e735d4-82c6-4083-bd01-382013995cc2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.226360 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.226410 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.267005 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.286655 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvwns\" (UniqueName: \"kubernetes.io/projected/17e735d4-82c6-4083-bd01-382013995cc2-kube-api-access-gvwns\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.286694 4842 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-util\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.286707 4842 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17e735d4-82c6-4083-bd01-382013995cc2-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.918176 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" event={"ID":"17e735d4-82c6-4083-bd01-382013995cc2","Type":"ContainerDied","Data":"1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151"} Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.918218 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c6987088ecbf79a96dd0088ca32298331b1f70802be9526fd75159c76520151" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.918222 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm" Nov 11 13:53:55 crc kubenswrapper[4842]: I1111 13:53:55.954411 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:57 crc kubenswrapper[4842]: I1111 13:53:57.887428 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:53:57 crc kubenswrapper[4842]: I1111 13:53:57.929004 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w6pdl" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="registry-server" containerID="cri-o://08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f" gracePeriod=2 Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.282265 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.325354 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content\") pod \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.325396 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities\") pod \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.325497 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzlnx\" (UniqueName: \"kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx\") pod \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\" (UID: \"1ca585f6-b1ff-4588-9b0b-260e5e37c790\") " Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.326518 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities" (OuterVolumeSpecName: "utilities") pod "1ca585f6-b1ff-4588-9b0b-260e5e37c790" (UID: "1ca585f6-b1ff-4588-9b0b-260e5e37c790"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.330455 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx" (OuterVolumeSpecName: "kube-api-access-pzlnx") pod "1ca585f6-b1ff-4588-9b0b-260e5e37c790" (UID: "1ca585f6-b1ff-4588-9b0b-260e5e37c790"). InnerVolumeSpecName "kube-api-access-pzlnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.426937 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzlnx\" (UniqueName: \"kubernetes.io/projected/1ca585f6-b1ff-4588-9b0b-260e5e37c790-kube-api-access-pzlnx\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.427000 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.946255 4842 generic.go:334] "Generic (PLEG): container finished" podID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerID="08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f" exitCode=0 Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.946309 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerDied","Data":"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f"} Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.946372 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w6pdl" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.946391 4842 scope.go:117] "RemoveContainer" containerID="08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.946377 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w6pdl" event={"ID":"1ca585f6-b1ff-4588-9b0b-260e5e37c790","Type":"ContainerDied","Data":"fe586743f0811d2de76a4c97758e6c391df6367c6eceef55bbac9e3056642d43"} Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.961150 4842 scope.go:117] "RemoveContainer" containerID="906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.976696 4842 scope.go:117] "RemoveContainer" containerID="7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.997961 4842 scope.go:117] "RemoveContainer" containerID="08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f" Nov 11 13:53:58 crc kubenswrapper[4842]: E1111 13:53:58.998404 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f\": container with ID starting with 08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f not found: ID does not exist" containerID="08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.998457 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f"} err="failed to get container status \"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f\": rpc error: code = NotFound desc = could not find container \"08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f\": container with ID starting with 08ba9a17c0d359290d66e5b4da120a3203b3c3cd79ef235157b52e05511f830f not found: ID does not exist" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.998490 4842 scope.go:117] "RemoveContainer" containerID="906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0" Nov 11 13:53:58 crc kubenswrapper[4842]: E1111 13:53:58.998867 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0\": container with ID starting with 906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0 not found: ID does not exist" containerID="906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.998921 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0"} err="failed to get container status \"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0\": rpc error: code = NotFound desc = could not find container \"906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0\": container with ID starting with 906e8ffcb231eb147c8e09cf34b0f52abb11bb42eb7c9745379b6722339ff7d0 not found: ID does not exist" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.998962 4842 scope.go:117] "RemoveContainer" containerID="7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b" Nov 11 13:53:58 crc kubenswrapper[4842]: E1111 13:53:58.999604 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b\": container with ID starting with 7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b not found: ID does not exist" containerID="7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b" Nov 11 13:53:58 crc kubenswrapper[4842]: I1111 13:53:58.999633 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b"} err="failed to get container status \"7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b\": rpc error: code = NotFound desc = could not find container \"7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b\": container with ID starting with 7496ab82a98ee11ff3005d16134dae48756c979678299e9ee4fbf75952eb7f6b not found: ID does not exist" Nov 11 13:53:59 crc kubenswrapper[4842]: I1111 13:53:59.416081 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ca585f6-b1ff-4588-9b0b-260e5e37c790" (UID: "1ca585f6-b1ff-4588-9b0b-260e5e37c790"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:53:59 crc kubenswrapper[4842]: I1111 13:53:59.441257 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ca585f6-b1ff-4588-9b0b-260e5e37c790-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:53:59 crc kubenswrapper[4842]: I1111 13:53:59.576128 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:53:59 crc kubenswrapper[4842]: I1111 13:53:59.579641 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w6pdl"] Nov 11 13:54:00 crc kubenswrapper[4842]: I1111 13:54:00.067831 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" path="/var/lib/kubelet/pods/1ca585f6-b1ff-4588-9b0b-260e5e37c790/volumes" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224218 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj"] Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224470 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="pull" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224481 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="pull" Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224495 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="extract" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224502 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="extract" Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224515 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="util" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224520 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="util" Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224530 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="extract-content" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224535 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="extract-content" Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224546 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="registry-server" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224551 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="registry-server" Nov 11 13:54:02 crc kubenswrapper[4842]: E1111 13:54:02.224560 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="extract-utilities" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224567 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="extract-utilities" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224673 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="17e735d4-82c6-4083-bd01-382013995cc2" containerName="extract" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.224695 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ca585f6-b1ff-4588-9b0b-260e5e37c790" containerName="registry-server" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.225360 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.227166 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-4qwn8" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.295143 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccft2\" (UniqueName: \"kubernetes.io/projected/e928b7e0-ef80-4622-9bec-93c14a6c734d-kube-api-access-ccft2\") pod \"openstack-operator-controller-operator-77d445568-tvtkj\" (UID: \"e928b7e0-ef80-4622-9bec-93c14a6c734d\") " pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.328304 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj"] Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.396050 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccft2\" (UniqueName: \"kubernetes.io/projected/e928b7e0-ef80-4622-9bec-93c14a6c734d-kube-api-access-ccft2\") pod \"openstack-operator-controller-operator-77d445568-tvtkj\" (UID: \"e928b7e0-ef80-4622-9bec-93c14a6c734d\") " pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.424785 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccft2\" (UniqueName: \"kubernetes.io/projected/e928b7e0-ef80-4622-9bec-93c14a6c734d-kube-api-access-ccft2\") pod \"openstack-operator-controller-operator-77d445568-tvtkj\" (UID: \"e928b7e0-ef80-4622-9bec-93c14a6c734d\") " pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.542738 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:02 crc kubenswrapper[4842]: I1111 13:54:02.965980 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj"] Nov 11 13:54:03 crc kubenswrapper[4842]: I1111 13:54:03.976294 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" event={"ID":"e928b7e0-ef80-4622-9bec-93c14a6c734d","Type":"ContainerStarted","Data":"46baed2dc90b7e9008bac66917dfb99842f7fca13f1f9b35faf161238541a55b"} Nov 11 13:54:07 crc kubenswrapper[4842]: I1111 13:54:07.003892 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" event={"ID":"e928b7e0-ef80-4622-9bec-93c14a6c734d","Type":"ContainerStarted","Data":"3f1ab8b38f9dafe2b8922b7788a171a35c86120af884e3d2c2227390bb58674c"} Nov 11 13:54:09 crc kubenswrapper[4842]: I1111 13:54:09.024394 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" event={"ID":"e928b7e0-ef80-4622-9bec-93c14a6c734d","Type":"ContainerStarted","Data":"648f2ce3b1897797ad74165c1adace84dfa03208d626f4d819d92315ef5e2bd1"} Nov 11 13:54:09 crc kubenswrapper[4842]: I1111 13:54:09.024958 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:09 crc kubenswrapper[4842]: I1111 13:54:09.052282 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" podStartSLOduration=1.70377722 podStartE2EDuration="7.05226439s" podCreationTimestamp="2025-11-11 13:54:02 +0000 UTC" firstStartedPulling="2025-11-11 13:54:02.983301185 +0000 UTC m=+1453.643590804" lastFinishedPulling="2025-11-11 13:54:08.331788355 +0000 UTC m=+1458.992077974" observedRunningTime="2025-11-11 13:54:09.047367356 +0000 UTC m=+1459.707656975" watchObservedRunningTime="2025-11-11 13:54:09.05226439 +0000 UTC m=+1459.712554009" Nov 11 13:54:12 crc kubenswrapper[4842]: I1111 13:54:12.546270 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-77d445568-tvtkj" Nov 11 13:54:14 crc kubenswrapper[4842]: I1111 13:54:14.961568 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:54:14 crc kubenswrapper[4842]: I1111 13:54:14.961645 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:54:14 crc kubenswrapper[4842]: I1111 13:54:14.961700 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:54:14 crc kubenswrapper[4842]: I1111 13:54:14.962597 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:54:14 crc kubenswrapper[4842]: I1111 13:54:14.962692 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec" gracePeriod=600 Nov 11 13:54:16 crc kubenswrapper[4842]: I1111 13:54:16.065680 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec" exitCode=0 Nov 11 13:54:16 crc kubenswrapper[4842]: I1111 13:54:16.065839 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec"} Nov 11 13:54:16 crc kubenswrapper[4842]: I1111 13:54:16.066271 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d"} Nov 11 13:54:16 crc kubenswrapper[4842]: I1111 13:54:16.066294 4842 scope.go:117] "RemoveContainer" containerID="1f1e7514c76335ccdcd4dfabef70e43a6a62ffa8315747869ab943efa0eff321" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.659933 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.662428 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.673789 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.785455 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.785544 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.785592 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcx9m\" (UniqueName: \"kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.887231 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcx9m\" (UniqueName: \"kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.887337 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.887379 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.887995 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.888196 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:24 crc kubenswrapper[4842]: I1111 13:54:24.915647 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcx9m\" (UniqueName: \"kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m\") pod \"community-operators-nbb5w\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:25 crc kubenswrapper[4842]: I1111 13:54:25.043078 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:25 crc kubenswrapper[4842]: I1111 13:54:25.403871 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:25 crc kubenswrapper[4842]: W1111 13:54:25.407188 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7e10df1_f131_4fba_99e0_117a30f01ba0.slice/crio-6c0de18bb5cbcc08898232186d626f5285810842bbeb2d943c0b27ec7685009d WatchSource:0}: Error finding container 6c0de18bb5cbcc08898232186d626f5285810842bbeb2d943c0b27ec7685009d: Status 404 returned error can't find the container with id 6c0de18bb5cbcc08898232186d626f5285810842bbeb2d943c0b27ec7685009d Nov 11 13:54:26 crc kubenswrapper[4842]: I1111 13:54:26.136354 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerID="9c1748eeb54451d9dcce355eaeeeb5f252b59baa3ef4176317306ff20904f36d" exitCode=0 Nov 11 13:54:26 crc kubenswrapper[4842]: I1111 13:54:26.136407 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerDied","Data":"9c1748eeb54451d9dcce355eaeeeb5f252b59baa3ef4176317306ff20904f36d"} Nov 11 13:54:26 crc kubenswrapper[4842]: I1111 13:54:26.136437 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerStarted","Data":"6c0de18bb5cbcc08898232186d626f5285810842bbeb2d943c0b27ec7685009d"} Nov 11 13:54:27 crc kubenswrapper[4842]: I1111 13:54:27.147406 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerStarted","Data":"8c2ea3ea98fdcabf10cadc8acca4ebfa975582936fa3d551174024035c05dd78"} Nov 11 13:54:28 crc kubenswrapper[4842]: I1111 13:54:28.153795 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerID="8c2ea3ea98fdcabf10cadc8acca4ebfa975582936fa3d551174024035c05dd78" exitCode=0 Nov 11 13:54:28 crc kubenswrapper[4842]: I1111 13:54:28.154042 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerDied","Data":"8c2ea3ea98fdcabf10cadc8acca4ebfa975582936fa3d551174024035c05dd78"} Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.024082 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.025280 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.028005 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-mkfvn" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.028176 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.029170 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.030358 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-mw8b2" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.036785 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.038176 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.044942 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hxm86" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.045097 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.048431 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.054694 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.060922 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.061875 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.064360 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-g8qbf" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.081551 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.090961 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.095089 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.096473 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.100531 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.105475 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-n6clf" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.105543 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fgzz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.134161 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.160318 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.166929 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2dbc\" (UniqueName: \"kubernetes.io/projected/b7462081-0162-4bd6-96fe-23a8c29df0db-kube-api-access-v2dbc\") pod \"cinder-operator-controller-manager-8dffd86b7-rldzd\" (UID: \"b7462081-0162-4bd6-96fe-23a8c29df0db\") " pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.167011 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rws4\" (UniqueName: \"kubernetes.io/projected/b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e-kube-api-access-5rws4\") pod \"barbican-operator-controller-manager-6999776966-pnbdh\" (UID: \"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e\") " pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.167033 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm4ws\" (UniqueName: \"kubernetes.io/projected/7c7223fc-d7fe-416d-8c4f-872f399ad3f3-kube-api-access-fm4ws\") pod \"glance-operator-controller-manager-774b65955b-mtvmd\" (UID: \"7c7223fc-d7fe-416d-8c4f-872f399ad3f3\") " pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.167055 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pts6\" (UniqueName: \"kubernetes.io/projected/d200f269-63a1-4cee-820f-1b42538f1fb9-kube-api-access-2pts6\") pod \"horizon-operator-controller-manager-7d445c6d8b-bqk7b\" (UID: \"d200f269-63a1-4cee-820f-1b42538f1fb9\") " pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.167073 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qpb5\" (UniqueName: \"kubernetes.io/projected/c0dc7222-a511-4010-b7ad-f1d4716958f8-kube-api-access-5qpb5\") pod \"designate-operator-controller-manager-67455b77fb-8g2hw\" (UID: \"c0dc7222-a511-4010-b7ad-f1d4716958f8\") " pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.167123 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mgrv\" (UniqueName: \"kubernetes.io/projected/00cc5552-7130-40ca-ab43-b6525d3199f4-kube-api-access-9mgrv\") pod \"heat-operator-controller-manager-6b57d4f86f-pkdl8\" (UID: \"00cc5552-7130-40ca-ab43-b6525d3199f4\") " pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.175546 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.176751 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.180252 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.184440 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qvzz5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.184600 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.187691 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.188978 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.190406 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-bb6nq" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.199183 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.200600 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.203355 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerStarted","Data":"15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04"} Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.204337 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-d7gzr" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.215645 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.216960 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.221870 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-2s6gc" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.225263 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.231035 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.241312 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.244856 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.247436 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.247859 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-lzdzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.267975 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268733 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rws4\" (UniqueName: \"kubernetes.io/projected/b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e-kube-api-access-5rws4\") pod \"barbican-operator-controller-manager-6999776966-pnbdh\" (UID: \"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e\") " pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268759 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm4ws\" (UniqueName: \"kubernetes.io/projected/7c7223fc-d7fe-416d-8c4f-872f399ad3f3-kube-api-access-fm4ws\") pod \"glance-operator-controller-manager-774b65955b-mtvmd\" (UID: \"7c7223fc-d7fe-416d-8c4f-872f399ad3f3\") " pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268788 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqszm\" (UniqueName: \"kubernetes.io/projected/d90f01be-5138-44bc-8330-0e8ee3914ba8-kube-api-access-tqszm\") pod \"keystone-operator-controller-manager-5c68d88c57-2k92j\" (UID: \"d90f01be-5138-44bc-8330-0e8ee3914ba8\") " pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268803 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmzmw\" (UniqueName: \"kubernetes.io/projected/8976057b-f908-4295-93a2-0bd3bb1441da-kube-api-access-lmzmw\") pod \"ironic-operator-controller-manager-8444f8f688-gl575\" (UID: \"8976057b-f908-4295-93a2-0bd3bb1441da\") " pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268819 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pts6\" (UniqueName: \"kubernetes.io/projected/d200f269-63a1-4cee-820f-1b42538f1fb9-kube-api-access-2pts6\") pod \"horizon-operator-controller-manager-7d445c6d8b-bqk7b\" (UID: \"d200f269-63a1-4cee-820f-1b42538f1fb9\") " pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268837 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qpb5\" (UniqueName: \"kubernetes.io/projected/c0dc7222-a511-4010-b7ad-f1d4716958f8-kube-api-access-5qpb5\") pod \"designate-operator-controller-manager-67455b77fb-8g2hw\" (UID: \"c0dc7222-a511-4010-b7ad-f1d4716958f8\") " pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268856 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268889 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qmzb\" (UniqueName: \"kubernetes.io/projected/6c3ed8b6-85b5-402b-994a-ca068cc5a357-kube-api-access-7qmzb\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268907 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mgrv\" (UniqueName: \"kubernetes.io/projected/00cc5552-7130-40ca-ab43-b6525d3199f4-kube-api-access-9mgrv\") pod \"heat-operator-controller-manager-6b57d4f86f-pkdl8\" (UID: \"00cc5552-7130-40ca-ab43-b6525d3199f4\") " pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.268953 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2dbc\" (UniqueName: \"kubernetes.io/projected/b7462081-0162-4bd6-96fe-23a8c29df0db-kube-api-access-v2dbc\") pod \"cinder-operator-controller-manager-8dffd86b7-rldzd\" (UID: \"b7462081-0162-4bd6-96fe-23a8c29df0db\") " pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.291255 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.292385 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.295235 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qpb5\" (UniqueName: \"kubernetes.io/projected/c0dc7222-a511-4010-b7ad-f1d4716958f8-kube-api-access-5qpb5\") pod \"designate-operator-controller-manager-67455b77fb-8g2hw\" (UID: \"c0dc7222-a511-4010-b7ad-f1d4716958f8\") " pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.295726 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mgrv\" (UniqueName: \"kubernetes.io/projected/00cc5552-7130-40ca-ab43-b6525d3199f4-kube-api-access-9mgrv\") pod \"heat-operator-controller-manager-6b57d4f86f-pkdl8\" (UID: \"00cc5552-7130-40ca-ab43-b6525d3199f4\") " pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.300619 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-njkjs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.305239 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rws4\" (UniqueName: \"kubernetes.io/projected/b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e-kube-api-access-5rws4\") pod \"barbican-operator-controller-manager-6999776966-pnbdh\" (UID: \"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e\") " pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.309566 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.310694 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.312390 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pts6\" (UniqueName: \"kubernetes.io/projected/d200f269-63a1-4cee-820f-1b42538f1fb9-kube-api-access-2pts6\") pod \"horizon-operator-controller-manager-7d445c6d8b-bqk7b\" (UID: \"d200f269-63a1-4cee-820f-1b42538f1fb9\") " pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.313115 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-v6nps" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.313546 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm4ws\" (UniqueName: \"kubernetes.io/projected/7c7223fc-d7fe-416d-8c4f-872f399ad3f3-kube-api-access-fm4ws\") pod \"glance-operator-controller-manager-774b65955b-mtvmd\" (UID: \"7c7223fc-d7fe-416d-8c4f-872f399ad3f3\") " pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.315692 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2dbc\" (UniqueName: \"kubernetes.io/projected/b7462081-0162-4bd6-96fe-23a8c29df0db-kube-api-access-v2dbc\") pod \"cinder-operator-controller-manager-8dffd86b7-rldzd\" (UID: \"b7462081-0162-4bd6-96fe-23a8c29df0db\") " pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.328167 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.329822 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.331476 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-cwsmb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.337422 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.347686 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.349395 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.373053 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.377448 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5qvn\" (UniqueName: \"kubernetes.io/projected/828ba013-e0fe-452c-a8ae-2dbb8e9436b4-kube-api-access-g5qvn\") pod \"nova-operator-controller-manager-8588b44bb6-2m4gd\" (UID: \"828ba013-e0fe-452c-a8ae-2dbb8e9436b4\") " pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.377499 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4d4c\" (UniqueName: \"kubernetes.io/projected/20a746aa-153e-4ad3-afb7-e5d771927b18-kube-api-access-s4d4c\") pod \"mariadb-operator-controller-manager-697bcb486c-xcdsm\" (UID: \"20a746aa-153e-4ad3-afb7-e5d771927b18\") " pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.377585 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqszm\" (UniqueName: \"kubernetes.io/projected/d90f01be-5138-44bc-8330-0e8ee3914ba8-kube-api-access-tqszm\") pod \"keystone-operator-controller-manager-5c68d88c57-2k92j\" (UID: \"d90f01be-5138-44bc-8330-0e8ee3914ba8\") " pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.377663 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmzmw\" (UniqueName: \"kubernetes.io/projected/8976057b-f908-4295-93a2-0bd3bb1441da-kube-api-access-lmzmw\") pod \"ironic-operator-controller-manager-8444f8f688-gl575\" (UID: \"8976057b-f908-4295-93a2-0bd3bb1441da\") " pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.380833 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.383009 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.383114 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fbsr\" (UniqueName: \"kubernetes.io/projected/8458ea94-f568-498e-9f67-f1a31cdb2fdf-kube-api-access-9fbsr\") pod \"octavia-operator-controller-manager-97dc668d8-scbz4\" (UID: \"8458ea94-f568-498e-9f67-f1a31cdb2fdf\") " pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.384698 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qmzb\" (UniqueName: \"kubernetes.io/projected/6c3ed8b6-85b5-402b-994a-ca068cc5a357-kube-api-access-7qmzb\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.384813 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm88s\" (UniqueName: \"kubernetes.io/projected/fae3f9e9-7308-454c-80e2-c836cfa04a44-kube-api-access-fm88s\") pod \"manila-operator-controller-manager-67c5b7495b-2sfch\" (UID: \"fae3f9e9-7308-454c-80e2-c836cfa04a44\") " pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:54:29 crc kubenswrapper[4842]: E1111 13:54:29.386155 4842 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 11 13:54:29 crc kubenswrapper[4842]: E1111 13:54:29.386329 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert podName:6c3ed8b6-85b5-402b-994a-ca068cc5a357 nodeName:}" failed. No retries permitted until 2025-11-11 13:54:29.886307898 +0000 UTC m=+1480.546597517 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert") pod "infra-operator-controller-manager-64cbcd8bcf-b9q8b" (UID: "6c3ed8b6-85b5-402b-994a-ca068cc5a357") : secret "infra-operator-webhook-server-cert" not found Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.400972 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nbb5w" podStartSLOduration=2.927726913 podStartE2EDuration="5.400949671s" podCreationTimestamp="2025-11-11 13:54:24 +0000 UTC" firstStartedPulling="2025-11-11 13:54:26.137725112 +0000 UTC m=+1476.798014731" lastFinishedPulling="2025-11-11 13:54:28.61094787 +0000 UTC m=+1479.271237489" observedRunningTime="2025-11-11 13:54:29.307831341 +0000 UTC m=+1479.968120960" watchObservedRunningTime="2025-11-11 13:54:29.400949671 +0000 UTC m=+1480.061239280" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.424575 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.436406 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.436964 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.443865 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmzmw\" (UniqueName: \"kubernetes.io/projected/8976057b-f908-4295-93a2-0bd3bb1441da-kube-api-access-lmzmw\") pod \"ironic-operator-controller-manager-8444f8f688-gl575\" (UID: \"8976057b-f908-4295-93a2-0bd3bb1441da\") " pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.445421 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqszm\" (UniqueName: \"kubernetes.io/projected/d90f01be-5138-44bc-8330-0e8ee3914ba8-kube-api-access-tqszm\") pod \"keystone-operator-controller-manager-5c68d88c57-2k92j\" (UID: \"d90f01be-5138-44bc-8330-0e8ee3914ba8\") " pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.445457 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.448836 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qmzb\" (UniqueName: \"kubernetes.io/projected/6c3ed8b6-85b5-402b-994a-ca068cc5a357-kube-api-access-7qmzb\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.472249 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.477572 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.481899 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-pqg5q" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.497836 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5qvn\" (UniqueName: \"kubernetes.io/projected/828ba013-e0fe-452c-a8ae-2dbb8e9436b4-kube-api-access-g5qvn\") pod \"nova-operator-controller-manager-8588b44bb6-2m4gd\" (UID: \"828ba013-e0fe-452c-a8ae-2dbb8e9436b4\") " pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.497888 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4d4c\" (UniqueName: \"kubernetes.io/projected/20a746aa-153e-4ad3-afb7-e5d771927b18-kube-api-access-s4d4c\") pod \"mariadb-operator-controller-manager-697bcb486c-xcdsm\" (UID: \"20a746aa-153e-4ad3-afb7-e5d771927b18\") " pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.497982 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fbsr\" (UniqueName: \"kubernetes.io/projected/8458ea94-f568-498e-9f67-f1a31cdb2fdf-kube-api-access-9fbsr\") pod \"octavia-operator-controller-manager-97dc668d8-scbz4\" (UID: \"8458ea94-f568-498e-9f67-f1a31cdb2fdf\") " pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.498015 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9zhn\" (UniqueName: \"kubernetes.io/projected/d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5-kube-api-access-b9zhn\") pod \"neutron-operator-controller-manager-fdd8575d6-rqzfb\" (UID: \"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5\") " pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.498069 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm88s\" (UniqueName: \"kubernetes.io/projected/fae3f9e9-7308-454c-80e2-c836cfa04a44-kube-api-access-fm88s\") pod \"manila-operator-controller-manager-67c5b7495b-2sfch\" (UID: \"fae3f9e9-7308-454c-80e2-c836cfa04a44\") " pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.517792 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4d4c\" (UniqueName: \"kubernetes.io/projected/20a746aa-153e-4ad3-afb7-e5d771927b18-kube-api-access-s4d4c\") pod \"mariadb-operator-controller-manager-697bcb486c-xcdsm\" (UID: \"20a746aa-153e-4ad3-afb7-e5d771927b18\") " pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.537379 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5qvn\" (UniqueName: \"kubernetes.io/projected/828ba013-e0fe-452c-a8ae-2dbb8e9436b4-kube-api-access-g5qvn\") pod \"nova-operator-controller-manager-8588b44bb6-2m4gd\" (UID: \"828ba013-e0fe-452c-a8ae-2dbb8e9436b4\") " pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.538597 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fbsr\" (UniqueName: \"kubernetes.io/projected/8458ea94-f568-498e-9f67-f1a31cdb2fdf-kube-api-access-9fbsr\") pod \"octavia-operator-controller-manager-97dc668d8-scbz4\" (UID: \"8458ea94-f568-498e-9f67-f1a31cdb2fdf\") " pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.541450 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.544486 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm88s\" (UniqueName: \"kubernetes.io/projected/fae3f9e9-7308-454c-80e2-c836cfa04a44-kube-api-access-fm88s\") pod \"manila-operator-controller-manager-67c5b7495b-2sfch\" (UID: \"fae3f9e9-7308-454c-80e2-c836cfa04a44\") " pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.549224 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.551631 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.570918 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.572177 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.573185 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.577318 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-2h8mn" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.582440 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.601912 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.602655 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpzdd\" (UniqueName: \"kubernetes.io/projected/85789962-b64f-422a-a2b4-4f98a786be81-kube-api-access-hpzdd\") pod \"ovn-operator-controller-manager-6559d764b4-ntbtw\" (UID: \"85789962-b64f-422a-a2b4-4f98a786be81\") " pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.602746 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9zhn\" (UniqueName: \"kubernetes.io/projected/d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5-kube-api-access-b9zhn\") pod \"neutron-operator-controller-manager-fdd8575d6-rqzfb\" (UID: \"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5\") " pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.611408 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.622666 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-sldlq" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.634062 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9zhn\" (UniqueName: \"kubernetes.io/projected/d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5-kube-api-access-b9zhn\") pod \"neutron-operator-controller-manager-fdd8575d6-rqzfb\" (UID: \"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5\") " pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.649736 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.651332 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.655453 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-l7rqc" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.665235 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.669217 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.689936 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.705785 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr5wb\" (UniqueName: \"kubernetes.io/projected/0b0bd151-ad85-46db-8425-fe640a956d01-kube-api-access-fr5wb\") pod \"placement-operator-controller-manager-776bc4cb49-lh5x5\" (UID: \"0b0bd151-ad85-46db-8425-fe640a956d01\") " pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.706056 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncrb7\" (UniqueName: \"kubernetes.io/projected/79eedf2f-0af7-46fa-aa0e-7d965ee918d3-kube-api-access-ncrb7\") pod \"swift-operator-controller-manager-57cf4f487c-8hwbs\" (UID: \"79eedf2f-0af7-46fa-aa0e-7d965ee918d3\") " pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.706179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzdd\" (UniqueName: \"kubernetes.io/projected/85789962-b64f-422a-a2b4-4f98a786be81-kube-api-access-hpzdd\") pod \"ovn-operator-controller-manager-6559d764b4-ntbtw\" (UID: \"85789962-b64f-422a-a2b4-4f98a786be81\") " pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.708158 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.723775 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.725702 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.725883 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.735140 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-c4rqc" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.738274 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpzdd\" (UniqueName: \"kubernetes.io/projected/85789962-b64f-422a-a2b4-4f98a786be81-kube-api-access-hpzdd\") pod \"ovn-operator-controller-manager-6559d764b4-ntbtw\" (UID: \"85789962-b64f-422a-a2b4-4f98a786be81\") " pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.743412 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.755846 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.756987 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.757069 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.761926 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-2f84v" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.765974 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.777590 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.798552 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.806399 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.806509 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.810238 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4phk8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.819184 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrb7\" (UniqueName: \"kubernetes.io/projected/79eedf2f-0af7-46fa-aa0e-7d965ee918d3-kube-api-access-ncrb7\") pod \"swift-operator-controller-manager-57cf4f487c-8hwbs\" (UID: \"79eedf2f-0af7-46fa-aa0e-7d965ee918d3\") " pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.819308 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krqr2\" (UniqueName: \"kubernetes.io/projected/f179c06b-83ea-4ece-b789-7bb5d75e05d5-kube-api-access-krqr2\") pod \"telemetry-operator-controller-manager-5cc784f744-5p2r8\" (UID: \"f179c06b-83ea-4ece-b789-7bb5d75e05d5\") " pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.819426 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.819473 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqgvl\" (UniqueName: \"kubernetes.io/projected/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-kube-api-access-bqgvl\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.819506 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr5wb\" (UniqueName: \"kubernetes.io/projected/0b0bd151-ad85-46db-8425-fe640a956d01-kube-api-access-fr5wb\") pod \"placement-operator-controller-manager-776bc4cb49-lh5x5\" (UID: \"0b0bd151-ad85-46db-8425-fe640a956d01\") " pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.843767 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.845371 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.846118 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.847324 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rt49c" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.849718 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.851875 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr5wb\" (UniqueName: \"kubernetes.io/projected/0b0bd151-ad85-46db-8425-fe640a956d01-kube-api-access-fr5wb\") pod \"placement-operator-controller-manager-776bc4cb49-lh5x5\" (UID: \"0b0bd151-ad85-46db-8425-fe640a956d01\") " pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.858071 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.868844 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncrb7\" (UniqueName: \"kubernetes.io/projected/79eedf2f-0af7-46fa-aa0e-7d965ee918d3-kube-api-access-ncrb7\") pod \"swift-operator-controller-manager-57cf4f487c-8hwbs\" (UID: \"79eedf2f-0af7-46fa-aa0e-7d965ee918d3\") " pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.889564 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.890453 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.908206 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-9tnft" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.919165 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh"] Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922117 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922164 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922271 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqgvl\" (UniqueName: \"kubernetes.io/projected/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-kube-api-access-bqgvl\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922397 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jm7h\" (UniqueName: \"kubernetes.io/projected/74d23e18-019f-4f09-9011-8d495ff3c70b-kube-api-access-2jm7h\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922422 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mvm2\" (UniqueName: \"kubernetes.io/projected/cfcfc6be-d566-4ba4-87e9-6157d249adc0-kube-api-access-2mvm2\") pod \"watcher-operator-controller-manager-6c495746fb-mgjxt\" (UID: \"cfcfc6be-d566-4ba4-87e9-6157d249adc0\") " pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922478 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skftz\" (UniqueName: \"kubernetes.io/projected/0f38813f-c55c-43d3-94bd-3ee9152e3db3-kube-api-access-skftz\") pod \"test-operator-controller-manager-66ff8cb84f-nqlnk\" (UID: \"0f38813f-c55c-43d3-94bd-3ee9152e3db3\") " pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922546 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krqr2\" (UniqueName: \"kubernetes.io/projected/f179c06b-83ea-4ece-b789-7bb5d75e05d5-kube-api-access-krqr2\") pod \"telemetry-operator-controller-manager-5cc784f744-5p2r8\" (UID: \"f179c06b-83ea-4ece-b789-7bb5d75e05d5\") " pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.922566 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:29 crc kubenswrapper[4842]: E1111 13:54:29.922729 4842 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 11 13:54:29 crc kubenswrapper[4842]: E1111 13:54:29.922807 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert podName:3f8fa56e-98d6-4af9-9ea6-13917e0c5aee nodeName:}" failed. No retries permitted until 2025-11-11 13:54:30.422790755 +0000 UTC m=+1481.083080374 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert") pod "openstack-baremetal-operator-controller-manager-54948dd897l2jn8" (UID: "3f8fa56e-98d6-4af9-9ea6-13917e0c5aee") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.923965 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.928878 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c3ed8b6-85b5-402b-994a-ca068cc5a357-cert\") pod \"infra-operator-controller-manager-64cbcd8bcf-b9q8b\" (UID: \"6c3ed8b6-85b5-402b-994a-ca068cc5a357\") " pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.956811 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.966692 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krqr2\" (UniqueName: \"kubernetes.io/projected/f179c06b-83ea-4ece-b789-7bb5d75e05d5-kube-api-access-krqr2\") pod \"telemetry-operator-controller-manager-5cc784f744-5p2r8\" (UID: \"f179c06b-83ea-4ece-b789-7bb5d75e05d5\") " pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.971320 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqgvl\" (UniqueName: \"kubernetes.io/projected/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-kube-api-access-bqgvl\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:29 crc kubenswrapper[4842]: I1111 13:54:29.980540 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.024026 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.024120 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v6d8\" (UniqueName: \"kubernetes.io/projected/dcb386ac-da43-4629-a57f-1d272c31bd46-kube-api-access-2v6d8\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-phmkh\" (UID: \"dcb386ac-da43-4629-a57f-1d272c31bd46\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.024191 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jm7h\" (UniqueName: \"kubernetes.io/projected/74d23e18-019f-4f09-9011-8d495ff3c70b-kube-api-access-2jm7h\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.024223 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mvm2\" (UniqueName: \"kubernetes.io/projected/cfcfc6be-d566-4ba4-87e9-6157d249adc0-kube-api-access-2mvm2\") pod \"watcher-operator-controller-manager-6c495746fb-mgjxt\" (UID: \"cfcfc6be-d566-4ba4-87e9-6157d249adc0\") " pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.024257 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skftz\" (UniqueName: \"kubernetes.io/projected/0f38813f-c55c-43d3-94bd-3ee9152e3db3-kube-api-access-skftz\") pod \"test-operator-controller-manager-66ff8cb84f-nqlnk\" (UID: \"0f38813f-c55c-43d3-94bd-3ee9152e3db3\") " pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:30 crc kubenswrapper[4842]: E1111 13:54:30.024294 4842 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 11 13:54:30 crc kubenswrapper[4842]: E1111 13:54:30.024632 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert podName:74d23e18-019f-4f09-9011-8d495ff3c70b nodeName:}" failed. No retries permitted until 2025-11-11 13:54:30.524610659 +0000 UTC m=+1481.184900528 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert") pod "openstack-operator-controller-manager-56b55c68d5-v2ffq" (UID: "74d23e18-019f-4f09-9011-8d495ff3c70b") : secret "webhook-server-cert" not found Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.059407 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skftz\" (UniqueName: \"kubernetes.io/projected/0f38813f-c55c-43d3-94bd-3ee9152e3db3-kube-api-access-skftz\") pod \"test-operator-controller-manager-66ff8cb84f-nqlnk\" (UID: \"0f38813f-c55c-43d3-94bd-3ee9152e3db3\") " pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.065071 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jm7h\" (UniqueName: \"kubernetes.io/projected/74d23e18-019f-4f09-9011-8d495ff3c70b-kube-api-access-2jm7h\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.065376 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mvm2\" (UniqueName: \"kubernetes.io/projected/cfcfc6be-d566-4ba4-87e9-6157d249adc0-kube-api-access-2mvm2\") pod \"watcher-operator-controller-manager-6c495746fb-mgjxt\" (UID: \"cfcfc6be-d566-4ba4-87e9-6157d249adc0\") " pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.121969 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.129564 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v6d8\" (UniqueName: \"kubernetes.io/projected/dcb386ac-da43-4629-a57f-1d272c31bd46-kube-api-access-2v6d8\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-phmkh\" (UID: \"dcb386ac-da43-4629-a57f-1d272c31bd46\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.135234 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.154882 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v6d8\" (UniqueName: \"kubernetes.io/projected/dcb386ac-da43-4629-a57f-1d272c31bd46-kube-api-access-2v6d8\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-phmkh\" (UID: \"dcb386ac-da43-4629-a57f-1d272c31bd46\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.155391 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd"] Nov 11 13:54:30 crc kubenswrapper[4842]: W1111 13:54:30.204754 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb01ab86b_b1f6_4f5e_ba91_06f6bb652d4e.slice/crio-c49c1e4020e230fac8d43c78aad079aea2aac52589a12aded2f2f270964f5736 WatchSource:0}: Error finding container c49c1e4020e230fac8d43c78aad079aea2aac52589a12aded2f2f270964f5736: Status 404 returned error can't find the container with id c49c1e4020e230fac8d43c78aad079aea2aac52589a12aded2f2f270964f5736 Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.211284 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.230863 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" event={"ID":"b7462081-0162-4bd6-96fe-23a8c29df0db","Type":"ContainerStarted","Data":"03f33ac7b82feae834563b2addb4233cd6c1edc8e98d1dd52a186d925c54821d"} Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.238495 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.346068 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.435987 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.454600 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.460021 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3f8fa56e-98d6-4af9-9ea6-13917e0c5aee-cert\") pod \"openstack-baremetal-operator-controller-manager-54948dd897l2jn8\" (UID: \"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.537888 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.544696 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/74d23e18-019f-4f09-9011-8d495ff3c70b-cert\") pod \"openstack-operator-controller-manager-56b55c68d5-v2ffq\" (UID: \"74d23e18-019f-4f09-9011-8d495ff3c70b\") " pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.582807 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.653052 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.759364 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.930632 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.939640 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.947764 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.955353 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.972325 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8"] Nov 11 13:54:30 crc kubenswrapper[4842]: W1111 13:54:30.975647 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90f01be_5138_44bc_8330_0e8ee3914ba8.slice/crio-eee7837ff09913baa4a2fc929c54283c40502810ed59aaa93786de4512b28bfa WatchSource:0}: Error finding container eee7837ff09913baa4a2fc929c54283c40502810ed59aaa93786de4512b28bfa: Status 404 returned error can't find the container with id eee7837ff09913baa4a2fc929c54283c40502810ed59aaa93786de4512b28bfa Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.980489 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd"] Nov 11 13:54:30 crc kubenswrapper[4842]: I1111 13:54:30.988561 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.243823 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" event={"ID":"20a746aa-153e-4ad3-afb7-e5d771927b18","Type":"ContainerStarted","Data":"b1b5cf7b0367c3e089273b504f3b48ab3112cbab583f8599702460a77e2d2f33"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.245571 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" event={"ID":"d200f269-63a1-4cee-820f-1b42538f1fb9","Type":"ContainerStarted","Data":"cbafca98a436a0059dfd607c09e7a2d17d588d588cb8f96dcb51027abe036b55"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.250597 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" event={"ID":"d90f01be-5138-44bc-8330-0e8ee3914ba8","Type":"ContainerStarted","Data":"eee7837ff09913baa4a2fc929c54283c40502810ed59aaa93786de4512b28bfa"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.251967 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" event={"ID":"7c7223fc-d7fe-416d-8c4f-872f399ad3f3","Type":"ContainerStarted","Data":"79841eec494420b2ff76987f6fb5c95b588041715e7fe2a377f40ce906580734"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.256447 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" event={"ID":"c0dc7222-a511-4010-b7ad-f1d4716958f8","Type":"ContainerStarted","Data":"ff30d4cd460b7e7056a91e26220f32faf9837df8b2f4e882b3a1378d1c8660d8"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.263392 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" event={"ID":"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e","Type":"ContainerStarted","Data":"c49c1e4020e230fac8d43c78aad079aea2aac52589a12aded2f2f270964f5736"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.264528 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.266068 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" event={"ID":"828ba013-e0fe-452c-a8ae-2dbb8e9436b4","Type":"ContainerStarted","Data":"d0d4695f476c8dd4157c2294e49715ed93c62643c567e94602d202b2a2d070df"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.267270 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" event={"ID":"8976057b-f908-4295-93a2-0bd3bb1441da","Type":"ContainerStarted","Data":"824395f0c90682e5d776cf9b9e1f5cbd9776ab450573bc954ce77eda84091791"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.271494 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" event={"ID":"00cc5552-7130-40ca-ab43-b6525d3199f4","Type":"ContainerStarted","Data":"9fa8338eba83a3b2568d4dab2d52b968e5e7d21bbdbdbe427a888bf9ef408531"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.275054 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.284611 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.300007 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" event={"ID":"fae3f9e9-7308-454c-80e2-c836cfa04a44","Type":"ContainerStarted","Data":"0a19320fd30f14bd19d9b009a9fec6dfe182d0576954bee2579ec66f72e3aa98"} Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.303234 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.317348 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.323909 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs"] Nov 11 13:54:31 crc kubenswrapper[4842]: W1111 13:54:31.327639 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8458ea94_f568_498e_9f67_f1a31cdb2fdf.slice/crio-4aed023045cb1d8ed4280039fb4880b9a8711caeea632611a4ee04b054448e46 WatchSource:0}: Error finding container 4aed023045cb1d8ed4280039fb4880b9a8711caeea632611a4ee04b054448e46: Status 404 returned error can't find the container with id 4aed023045cb1d8ed4280039fb4880b9a8711caeea632611a4ee04b054448e46 Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.342203 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.347141 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.351154 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.357484 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.362163 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8"] Nov 11 13:54:31 crc kubenswrapper[4842]: I1111 13:54:31.369508 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8"] Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.372870 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:8ed782cb1a952aa31950a9153a51c25fc30b130a928b58e0870e1071d5e5efbe,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ncrb7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-57cf4f487c-8hwbs_openstack-operators(79eedf2f-0af7-46fa-aa0e-7d965ee918d3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.414388 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:9ed3e90ad9a4613c6c9c924487006183539b153fc00136ed37b3732f9f6ca2f2,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hpzdd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-6559d764b4-ntbtw_openstack-operators(85789962-b64f-422a-a2b4-4f98a786be81): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.414747 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:6f14cbcb71da0fc6f849a4ca8b14a001c6fc62a1b9e78222f0d23f92fd51f5a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bqgvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-54948dd897l2jn8_openstack-operators(3f8fa56e-98d6-4af9-9ea6-13917e0c5aee): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.421736 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.132:5001/openstack-k8s-operators/watcher-operator:b07a7f6b78c4a9ea3cf76adc09f75764e5704245,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2mvm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6c495746fb-mgjxt_openstack-operators(cfcfc6be-d566-4ba4-87e9-6157d249adc0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.421838 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2v6d8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-phmkh_openstack-operators(dcb386ac-da43-4629-a57f-1d272c31bd46): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.422960 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" podUID="dcb386ac-da43-4629-a57f-1d272c31bd46" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.514252 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7c7268354f4ce92a0112580323172c603f39eb8339098f40cda9a8e58b4a98e4,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-krqr2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5cc784f744-5p2r8_openstack-operators(f179c06b-83ea-4ece-b789-7bb5d75e05d5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.719040 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" podUID="79eedf2f-0af7-46fa-aa0e-7d965ee918d3" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.731287 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" podUID="cfcfc6be-d566-4ba4-87e9-6157d249adc0" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.883144 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" podUID="85789962-b64f-422a-a2b4-4f98a786be81" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.939910 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" podUID="3f8fa56e-98d6-4af9-9ea6-13917e0c5aee" Nov 11 13:54:31 crc kubenswrapper[4842]: E1111 13:54:31.985659 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" podUID="f179c06b-83ea-4ece-b789-7bb5d75e05d5" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.334829 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" event={"ID":"8458ea94-f568-498e-9f67-f1a31cdb2fdf","Type":"ContainerStarted","Data":"4aed023045cb1d8ed4280039fb4880b9a8711caeea632611a4ee04b054448e46"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.347857 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" event={"ID":"dcb386ac-da43-4629-a57f-1d272c31bd46","Type":"ContainerStarted","Data":"fb6283c148641076bcaff487016ae3b4be1c9364c9758f6bb2b26da2225e2482"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.385234 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" podUID="dcb386ac-da43-4629-a57f-1d272c31bd46" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.430343 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" event={"ID":"f179c06b-83ea-4ece-b789-7bb5d75e05d5","Type":"ContainerStarted","Data":"bbf8f8d7171cd61d4cde3cbaa044df5ddb317d38a778de9ef1ae0caea9ad9bd9"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.430388 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" event={"ID":"f179c06b-83ea-4ece-b789-7bb5d75e05d5","Type":"ContainerStarted","Data":"031d2b94f5a788bcafc96e26d66467e34c228170d4147401c73413bbb262d09e"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.455800 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7c7268354f4ce92a0112580323172c603f39eb8339098f40cda9a8e58b4a98e4\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" podUID="f179c06b-83ea-4ece-b789-7bb5d75e05d5" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.506264 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" event={"ID":"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee","Type":"ContainerStarted","Data":"0da19742d07871ebb75efcd9de9b453643b47bb9b31d5ab7e366c294d55e25cd"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.506311 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" event={"ID":"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee","Type":"ContainerStarted","Data":"b638167050ae20a128c7669cb8dda693a6e978f969e120415124087958ab0bee"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.521360 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:6f14cbcb71da0fc6f849a4ca8b14a001c6fc62a1b9e78222f0d23f92fd51f5a0\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" podUID="3f8fa56e-98d6-4af9-9ea6-13917e0c5aee" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.523252 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" event={"ID":"74d23e18-019f-4f09-9011-8d495ff3c70b","Type":"ContainerStarted","Data":"7bd3fe86b72f6985be5e842d9f62f846060e3e70ba3b912c6a6033a5fc114567"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.523288 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" event={"ID":"74d23e18-019f-4f09-9011-8d495ff3c70b","Type":"ContainerStarted","Data":"9f4f4151b81f0f0aa3ac546c58f36ad0f021dd2f70dd386e8210fffed919d11e"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.523298 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" event={"ID":"74d23e18-019f-4f09-9011-8d495ff3c70b","Type":"ContainerStarted","Data":"ad3f0e00e8e0de288c262fa4e06f84c7454d22b06cf1f099fb875adc2e814f00"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.523941 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.546352 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" event={"ID":"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5","Type":"ContainerStarted","Data":"2d34505fc15b375ba4c4007afaf39860b5771f5dbdfd0dc5ac095bbcb0ff0ec5"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.575288 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" event={"ID":"0b0bd151-ad85-46db-8425-fe640a956d01","Type":"ContainerStarted","Data":"42149b03cee1494cc55df83956a486c4ba3968720e94360f33117f820f1f2fb4"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.606763 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" event={"ID":"cfcfc6be-d566-4ba4-87e9-6157d249adc0","Type":"ContainerStarted","Data":"a7066b4c1541bdc278e7bfba443580184cbf3aecfa41e6c36727265ae13b5a24"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.606818 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" event={"ID":"cfcfc6be-d566-4ba4-87e9-6157d249adc0","Type":"ContainerStarted","Data":"f8ccfaa93060b6a940cfbf0b5506cba28c9bc08951f71132b008fb100982dcf3"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.633913 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/openstack-k8s-operators/watcher-operator:b07a7f6b78c4a9ea3cf76adc09f75764e5704245\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" podUID="cfcfc6be-d566-4ba4-87e9-6157d249adc0" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.634109 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" event={"ID":"6c3ed8b6-85b5-402b-994a-ca068cc5a357","Type":"ContainerStarted","Data":"54c3995ba7259bde91438d5eb6d9d68be5aa60795c0d21e4fa38e7fc3adc1f95"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.647498 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" event={"ID":"0f38813f-c55c-43d3-94bd-3ee9152e3db3","Type":"ContainerStarted","Data":"f7406b9bd70a4803d525b2cb2e652937ba8c5e9d2a3c7aed324decc84a67c78e"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.648369 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" podStartSLOduration=3.64835247 podStartE2EDuration="3.64835247s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:54:32.64772595 +0000 UTC m=+1483.308015569" watchObservedRunningTime="2025-11-11 13:54:32.64835247 +0000 UTC m=+1483.308642089" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.663349 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" event={"ID":"85789962-b64f-422a-a2b4-4f98a786be81","Type":"ContainerStarted","Data":"ea9d49dc9a77526c3ec5968cbfc0670bd8db34eda25e3e0ba7ed69043e51cd60"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.663399 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" event={"ID":"85789962-b64f-422a-a2b4-4f98a786be81","Type":"ContainerStarted","Data":"d8c55f4a4a28fad8381ff33858f543974b39e0bd485a921dd761af39d4df4b01"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.668288 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:9ed3e90ad9a4613c6c9c924487006183539b153fc00136ed37b3732f9f6ca2f2\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" podUID="85789962-b64f-422a-a2b4-4f98a786be81" Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.684823 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" event={"ID":"79eedf2f-0af7-46fa-aa0e-7d965ee918d3","Type":"ContainerStarted","Data":"5e4b7b27785d82ccf34391138faae52dbdd98d3038466db8be798babb3a8ad0b"} Nov 11 13:54:32 crc kubenswrapper[4842]: I1111 13:54:32.684885 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" event={"ID":"79eedf2f-0af7-46fa-aa0e-7d965ee918d3","Type":"ContainerStarted","Data":"ce8f89dcf77ef1a715140cf45c215fd1a4464cb9a08810d4cb7303ac7a19c185"} Nov 11 13:54:32 crc kubenswrapper[4842]: E1111 13:54:32.689239 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:8ed782cb1a952aa31950a9153a51c25fc30b130a928b58e0870e1071d5e5efbe\\\"\"" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" podUID="79eedf2f-0af7-46fa-aa0e-7d965ee918d3" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.693842 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:6f14cbcb71da0fc6f849a4ca8b14a001c6fc62a1b9e78222f0d23f92fd51f5a0\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" podUID="3f8fa56e-98d6-4af9-9ea6-13917e0c5aee" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.694619 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7c7268354f4ce92a0112580323172c603f39eb8339098f40cda9a8e58b4a98e4\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" podUID="f179c06b-83ea-4ece-b789-7bb5d75e05d5" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.694692 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:8ed782cb1a952aa31950a9153a51c25fc30b130a928b58e0870e1071d5e5efbe\\\"\"" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" podUID="79eedf2f-0af7-46fa-aa0e-7d965ee918d3" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.694748 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" podUID="dcb386ac-da43-4629-a57f-1d272c31bd46" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.694794 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/openstack-k8s-operators/watcher-operator:b07a7f6b78c4a9ea3cf76adc09f75764e5704245\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" podUID="cfcfc6be-d566-4ba4-87e9-6157d249adc0" Nov 11 13:54:33 crc kubenswrapper[4842]: E1111 13:54:33.697991 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:9ed3e90ad9a4613c6c9c924487006183539b153fc00136ed37b3732f9f6ca2f2\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" podUID="85789962-b64f-422a-a2b4-4f98a786be81" Nov 11 13:54:35 crc kubenswrapper[4842]: I1111 13:54:35.043535 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:35 crc kubenswrapper[4842]: I1111 13:54:35.043805 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:35 crc kubenswrapper[4842]: I1111 13:54:35.081305 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:35 crc kubenswrapper[4842]: I1111 13:54:35.748528 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:35 crc kubenswrapper[4842]: I1111 13:54:35.796512 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:37 crc kubenswrapper[4842]: I1111 13:54:37.717329 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nbb5w" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="registry-server" containerID="cri-o://15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" gracePeriod=2 Nov 11 13:54:38 crc kubenswrapper[4842]: I1111 13:54:38.730858 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerID="15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" exitCode=0 Nov 11 13:54:38 crc kubenswrapper[4842]: I1111 13:54:38.730928 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerDied","Data":"15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04"} Nov 11 13:54:40 crc kubenswrapper[4842]: I1111 13:54:40.658556 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56b55c68d5-v2ffq" Nov 11 13:54:44 crc kubenswrapper[4842]: E1111 13:54:44.203742 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:f4ff739c066e6ba873376bd1cdb533b68b8ffa38f0e399b0d315476c05c6b322" Nov 11 13:54:44 crc kubenswrapper[4842]: E1111 13:54:44.204190 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:f4ff739c066e6ba873376bd1cdb533b68b8ffa38f0e399b0d315476c05c6b322,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fr5wb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-776bc4cb49-lh5x5_openstack-operators(0b0bd151-ad85-46db-8425-fe640a956d01): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:54:44 crc kubenswrapper[4842]: E1111 13:54:44.614729 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:c8a28d99a2de585df772e1d495c8dbc7e9b25a71f18c677b3f15aea6b275ca92" Nov 11 13:54:44 crc kubenswrapper[4842]: E1111 13:54:44.614896 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:c8a28d99a2de585df772e1d495c8dbc7e9b25a71f18c677b3f15aea6b275ca92,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fm88s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-67c5b7495b-2sfch_openstack-operators(fae3f9e9-7308-454c-80e2-c836cfa04a44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.045213 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04 is running failed: container process not found" containerID="15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" cmd=["grpc_health_probe","-addr=:50051"] Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.046062 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04 is running failed: container process not found" containerID="15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" cmd=["grpc_health_probe","-addr=:50051"] Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.046406 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04 is running failed: container process not found" containerID="15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" cmd=["grpc_health_probe","-addr=:50051"] Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.046446 4842 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-nbb5w" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="registry-server" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.085630 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:810bea5c7caaace8aa8388545d110e7c6e412c6f7ba0e74d027970ec394690d0" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.085797 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:810bea5c7caaace8aa8388545d110e7c6e412c6f7ba0e74d027970ec394690d0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7qmzb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-64cbcd8bcf-b9q8b_openstack-operators(6c3ed8b6-85b5-402b-994a-ca068cc5a357): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.646964 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:c5f2ac270a982a599e665960ae2622703421f3b5bd2952dc0a82735c474eb9a8" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.647197 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:c5f2ac270a982a599e665960ae2622703421f3b5bd2952dc0a82735c474eb9a8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v2dbc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-8dffd86b7-rldzd_openstack-operators(b7462081-0162-4bd6-96fe-23a8c29df0db): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.706411 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.787129 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nbb5w" event={"ID":"a7e10df1-f131-4fba-99e0-117a30f01ba0","Type":"ContainerDied","Data":"6c0de18bb5cbcc08898232186d626f5285810842bbeb2d943c0b27ec7685009d"} Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.787200 4842 scope.go:117] "RemoveContainer" containerID="15a25057a883592095b210304928d4d151a69910f9be15fcebea5f0e8f481b04" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.787314 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nbb5w" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.809134 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content\") pod \"a7e10df1-f131-4fba-99e0-117a30f01ba0\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.809306 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities\") pod \"a7e10df1-f131-4fba-99e0-117a30f01ba0\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.809365 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcx9m\" (UniqueName: \"kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m\") pod \"a7e10df1-f131-4fba-99e0-117a30f01ba0\" (UID: \"a7e10df1-f131-4fba-99e0-117a30f01ba0\") " Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.812321 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities" (OuterVolumeSpecName: "utilities") pod "a7e10df1-f131-4fba-99e0-117a30f01ba0" (UID: "a7e10df1-f131-4fba-99e0-117a30f01ba0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.820452 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m" (OuterVolumeSpecName: "kube-api-access-gcx9m") pod "a7e10df1-f131-4fba-99e0-117a30f01ba0" (UID: "a7e10df1-f131-4fba-99e0-117a30f01ba0"). InnerVolumeSpecName "kube-api-access-gcx9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.877699 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" podUID="b7462081-0162-4bd6-96fe-23a8c29df0db" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.879713 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7e10df1-f131-4fba-99e0-117a30f01ba0" (UID: "a7e10df1-f131-4fba-99e0-117a30f01ba0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.892367 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" podUID="6c3ed8b6-85b5-402b-994a-ca068cc5a357" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.916279 4842 scope.go:117] "RemoveContainer" containerID="8c2ea3ea98fdcabf10cadc8acca4ebfa975582936fa3d551174024035c05dd78" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.917146 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.917168 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7e10df1-f131-4fba-99e0-117a30f01ba0-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:54:45 crc kubenswrapper[4842]: I1111 13:54:45.917181 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcx9m\" (UniqueName: \"kubernetes.io/projected/a7e10df1-f131-4fba-99e0-117a30f01ba0-kube-api-access-gcx9m\") on node \"crc\" DevicePath \"\"" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.933619 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" podUID="0b0bd151-ad85-46db-8425-fe640a956d01" Nov 11 13:54:45 crc kubenswrapper[4842]: E1111 13:54:45.984003 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" podUID="fae3f9e9-7308-454c-80e2-c836cfa04a44" Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.080913 4842 scope.go:117] "RemoveContainer" containerID="9c1748eeb54451d9dcce355eaeeeb5f252b59baa3ef4176317306ff20904f36d" Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.539367 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.548394 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nbb5w"] Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.796920 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" event={"ID":"6c3ed8b6-85b5-402b-994a-ca068cc5a357","Type":"ContainerStarted","Data":"b8708b78dc538e9064ce9d1c992fedc855454b90f7e86386d961721e447b7167"} Nov 11 13:54:46 crc kubenswrapper[4842]: E1111 13:54:46.798582 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:810bea5c7caaace8aa8388545d110e7c6e412c6f7ba0e74d027970ec394690d0\\\"\"" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" podUID="6c3ed8b6-85b5-402b-994a-ca068cc5a357" Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.800672 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" event={"ID":"fae3f9e9-7308-454c-80e2-c836cfa04a44","Type":"ContainerStarted","Data":"9390f6e9e6c2ac5a289a5a69989aee9016767f4a8f8ef45c3112399060e854a6"} Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.802733 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" event={"ID":"b7462081-0162-4bd6-96fe-23a8c29df0db","Type":"ContainerStarted","Data":"88edf85597d1663822f2c1a5638346437a1b1e69b31e1134609493c758aed270"} Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.805696 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" event={"ID":"d200f269-63a1-4cee-820f-1b42538f1fb9","Type":"ContainerStarted","Data":"b1f1842071e5e64e9c7cc8c07964a04507ec25e663f35bee6d84748c6c2961b4"} Nov 11 13:54:46 crc kubenswrapper[4842]: E1111 13:54:46.807722 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:c5f2ac270a982a599e665960ae2622703421f3b5bd2952dc0a82735c474eb9a8\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" podUID="b7462081-0162-4bd6-96fe-23a8c29df0db" Nov 11 13:54:46 crc kubenswrapper[4842]: E1111 13:54:46.808044 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:c8a28d99a2de585df772e1d495c8dbc7e9b25a71f18c677b3f15aea6b275ca92\\\"\"" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" podUID="fae3f9e9-7308-454c-80e2-c836cfa04a44" Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.808982 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" event={"ID":"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5","Type":"ContainerStarted","Data":"33674def68fcfb690c3ba69149ec7ac4c27c02ca686f3098ef93f66bc781fdf4"} Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.811649 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" event={"ID":"c0dc7222-a511-4010-b7ad-f1d4716958f8","Type":"ContainerStarted","Data":"028e8bffe41b96e3a8df84a6b86ce41fb3ff4e5ad1844a7e2f1b99d73e2cfb38"} Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.813965 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" event={"ID":"7c7223fc-d7fe-416d-8c4f-872f399ad3f3","Type":"ContainerStarted","Data":"f9ae55cee5a0ac06f9029ff90442fa4a3d1b7235d860af8af64a8e377c398bfc"} Nov 11 13:54:46 crc kubenswrapper[4842]: I1111 13:54:46.820756 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" event={"ID":"0b0bd151-ad85-46db-8425-fe640a956d01","Type":"ContainerStarted","Data":"6102bd77a51d35d2e5c2d2a2d99f7b026ab1724965372b8cd7bb5eb042e5c380"} Nov 11 13:54:46 crc kubenswrapper[4842]: E1111 13:54:46.822878 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:f4ff739c066e6ba873376bd1cdb533b68b8ffa38f0e399b0d315476c05c6b322\\\"\"" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" podUID="0b0bd151-ad85-46db-8425-fe640a956d01" Nov 11 13:54:47 crc kubenswrapper[4842]: E1111 13:54:47.828159 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:810bea5c7caaace8aa8388545d110e7c6e412c6f7ba0e74d027970ec394690d0\\\"\"" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" podUID="6c3ed8b6-85b5-402b-994a-ca068cc5a357" Nov 11 13:54:47 crc kubenswrapper[4842]: E1111 13:54:47.828491 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:c5f2ac270a982a599e665960ae2622703421f3b5bd2952dc0a82735c474eb9a8\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" podUID="b7462081-0162-4bd6-96fe-23a8c29df0db" Nov 11 13:54:47 crc kubenswrapper[4842]: E1111 13:54:47.828516 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:f4ff739c066e6ba873376bd1cdb533b68b8ffa38f0e399b0d315476c05c6b322\\\"\"" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" podUID="0b0bd151-ad85-46db-8425-fe640a956d01" Nov 11 13:54:47 crc kubenswrapper[4842]: E1111 13:54:47.828528 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:c8a28d99a2de585df772e1d495c8dbc7e9b25a71f18c677b3f15aea6b275ca92\\\"\"" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" podUID="fae3f9e9-7308-454c-80e2-c836cfa04a44" Nov 11 13:54:48 crc kubenswrapper[4842]: I1111 13:54:48.076951 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" path="/var/lib/kubelet/pods/a7e10df1-f131-4fba-99e0-117a30f01ba0/volumes" Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.861307 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" event={"ID":"828ba013-e0fe-452c-a8ae-2dbb8e9436b4","Type":"ContainerStarted","Data":"273916d5ffc61ecfaca3463e6281f044dc939b6f5460f260573fad8cf301e2f5"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.881305 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" event={"ID":"d90f01be-5138-44bc-8330-0e8ee3914ba8","Type":"ContainerStarted","Data":"60dc398a054ffac5029530f5fb5040b7bc4074bcb40318b02c5a07d40b62f840"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.894255 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" event={"ID":"8976057b-f908-4295-93a2-0bd3bb1441da","Type":"ContainerStarted","Data":"f6519a5a1195f022cb73dedc65ffaf4a3696a95e072ac20afa982af344f3cedc"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.923272 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" event={"ID":"8458ea94-f568-498e-9f67-f1a31cdb2fdf","Type":"ContainerStarted","Data":"9d544a2ceae0b0e06499765d48b554f9717b7137eaea6acb7fef903bf36063d1"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.942117 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" event={"ID":"00cc5552-7130-40ca-ab43-b6525d3199f4","Type":"ContainerStarted","Data":"f29b589ba6efd61aab544cc3ea0841b40adca97c94416d156aabe8724989ea1f"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.946928 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" event={"ID":"c0dc7222-a511-4010-b7ad-f1d4716958f8","Type":"ContainerStarted","Data":"fe249751335dfa3e71273d7883f95de85949f056f2fdcf93153c45a71f967839"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.947137 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.973139 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" event={"ID":"d200f269-63a1-4cee-820f-1b42538f1fb9","Type":"ContainerStarted","Data":"05900abaddb5c8e03a2ea1d0cbb7c010495956c7710c0d2c88e0bcb0da4fff63"} Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.973949 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:49 crc kubenswrapper[4842]: I1111 13:54:49.982253 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" podStartSLOduration=5.946499792 podStartE2EDuration="20.982226633s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.611532789 +0000 UTC m=+1481.271822408" lastFinishedPulling="2025-11-11 13:54:45.64725963 +0000 UTC m=+1496.307549249" observedRunningTime="2025-11-11 13:54:49.973705044 +0000 UTC m=+1500.633994683" watchObservedRunningTime="2025-11-11 13:54:49.982226633 +0000 UTC m=+1500.642516252" Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.005709 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" podStartSLOduration=6.350063872 podStartE2EDuration="21.005679644s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.991346479 +0000 UTC m=+1481.651636098" lastFinishedPulling="2025-11-11 13:54:45.646962251 +0000 UTC m=+1496.307251870" observedRunningTime="2025-11-11 13:54:49.998223148 +0000 UTC m=+1500.658512788" watchObservedRunningTime="2025-11-11 13:54:50.005679644 +0000 UTC m=+1500.665969263" Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.012862 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" event={"ID":"d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5","Type":"ContainerStarted","Data":"4ac17bfe587b5acb3bbdfa86dd117609ef003cfe7cdcf21d2751d2a2f23180ce"} Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.013050 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.022607 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" event={"ID":"7c7223fc-d7fe-416d-8c4f-872f399ad3f3","Type":"ContainerStarted","Data":"0d48343d6696ce181936f05c16cce4c6b5dac5c55b8ae7472341a28feb017489"} Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.023318 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.026244 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" event={"ID":"20a746aa-153e-4ad3-afb7-e5d771927b18","Type":"ContainerStarted","Data":"e8151ea91253d50498432d3dc3ffe96d060409fb226569e7ed59fb7e7c46f92a"} Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.029499 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" event={"ID":"0f38813f-c55c-43d3-94bd-3ee9152e3db3","Type":"ContainerStarted","Data":"f24e14a9b6957a41db8fcef848c95b856f11ba66fdd1b2782c9f6b4ded609ee4"} Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.040109 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" event={"ID":"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e","Type":"ContainerStarted","Data":"b84327fc1c7f1d286469b31a0082eb2ab145de9d29ce5fdd64b8ce3feeaeaf26"} Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.045241 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" podStartSLOduration=6.659029236 podStartE2EDuration="21.045223452s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.293549049 +0000 UTC m=+1481.953838658" lastFinishedPulling="2025-11-11 13:54:45.679743255 +0000 UTC m=+1496.340032874" observedRunningTime="2025-11-11 13:54:50.039741629 +0000 UTC m=+1500.700031278" watchObservedRunningTime="2025-11-11 13:54:50.045223452 +0000 UTC m=+1500.705513071" Nov 11 13:54:50 crc kubenswrapper[4842]: I1111 13:54:50.086341 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" podStartSLOduration=5.655699031 podStartE2EDuration="21.086321349s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.216386274 +0000 UTC m=+1480.876675883" lastFinishedPulling="2025-11-11 13:54:45.647008582 +0000 UTC m=+1496.307298201" observedRunningTime="2025-11-11 13:54:50.085160923 +0000 UTC m=+1500.745450542" watchObservedRunningTime="2025-11-11 13:54:50.086321349 +0000 UTC m=+1500.746610988" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.050794 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" event={"ID":"00cc5552-7130-40ca-ab43-b6525d3199f4","Type":"ContainerStarted","Data":"ee57a0a6a0a85f7e1ccc77731d112da1b5d773a87d1c5b7e1cb5a4810f84ece6"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.051990 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.054218 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" event={"ID":"20a746aa-153e-4ad3-afb7-e5d771927b18","Type":"ContainerStarted","Data":"885d20e5a261fefbe292e132487c293b17e4ca9a43cafaff4b54c1772f36cb54"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.054849 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.057047 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" event={"ID":"0f38813f-c55c-43d3-94bd-3ee9152e3db3","Type":"ContainerStarted","Data":"c177d30c0926a06bba3447ccdac5a2d38f2909910198df5af96d22490c3aed22"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.057615 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.060023 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" event={"ID":"b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e","Type":"ContainerStarted","Data":"27cf1e1b340383ad3acef81fa2929318fb3c02217edcf2d2cfbc2a5b621fde24"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.060143 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.062642 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" event={"ID":"828ba013-e0fe-452c-a8ae-2dbb8e9436b4","Type":"ContainerStarted","Data":"8d0b730980ae5875f3c4430ca308dfc38e41fce14a3f8e34dee768d4e0ff3628"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.062830 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.065473 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" event={"ID":"d90f01be-5138-44bc-8330-0e8ee3914ba8","Type":"ContainerStarted","Data":"a419db9d72d52f223ddabcd92341630aa5a7721f7098c169f1df37f4bb9bfeda"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.065604 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.067667 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" event={"ID":"8976057b-f908-4295-93a2-0bd3bb1441da","Type":"ContainerStarted","Data":"0191107c14cf5921a8427a6c45a130f0962003f17ab349148f3e27509101986f"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.067978 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.070090 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" event={"ID":"8458ea94-f568-498e-9f67-f1a31cdb2fdf","Type":"ContainerStarted","Data":"ab7cd16941feefba992ceaf0058e7e75ed6b407090a0132a1fbaa9d2862ddd4a"} Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.072992 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d445c6d8b-bqk7b" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.073157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-fdd8575d6-rqzfb" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.073558 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-774b65955b-mtvmd" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.073711 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-67455b77fb-8g2hw" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.086409 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" podStartSLOduration=7.430681817 podStartE2EDuration="22.086383341s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.991309518 +0000 UTC m=+1481.651599137" lastFinishedPulling="2025-11-11 13:54:45.647011042 +0000 UTC m=+1496.307300661" observedRunningTime="2025-11-11 13:54:51.077643285 +0000 UTC m=+1501.737932904" watchObservedRunningTime="2025-11-11 13:54:51.086383341 +0000 UTC m=+1501.746672970" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.131672 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" podStartSLOduration=7.449442229 podStartE2EDuration="22.13164983s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.008224292 +0000 UTC m=+1481.668513911" lastFinishedPulling="2025-11-11 13:54:45.690431893 +0000 UTC m=+1496.350721512" observedRunningTime="2025-11-11 13:54:51.126882469 +0000 UTC m=+1501.787172088" watchObservedRunningTime="2025-11-11 13:54:51.13164983 +0000 UTC m=+1501.791939459" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.147173 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" podStartSLOduration=7.7952101240000005 podStartE2EDuration="22.147149439s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.370279941 +0000 UTC m=+1482.030569560" lastFinishedPulling="2025-11-11 13:54:45.722219256 +0000 UTC m=+1496.382508875" observedRunningTime="2025-11-11 13:54:51.144156084 +0000 UTC m=+1501.804445723" watchObservedRunningTime="2025-11-11 13:54:51.147149439 +0000 UTC m=+1501.807439068" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.239082 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" podStartSLOduration=7.929057039 podStartE2EDuration="22.239063181s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.37150302 +0000 UTC m=+1482.031792639" lastFinishedPulling="2025-11-11 13:54:45.681509162 +0000 UTC m=+1496.341798781" observedRunningTime="2025-11-11 13:54:51.230856082 +0000 UTC m=+1501.891145701" watchObservedRunningTime="2025-11-11 13:54:51.239063181 +0000 UTC m=+1501.899352800" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.261522 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" podStartSLOduration=6.777168205 podStartE2EDuration="22.261501179s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.218069037 +0000 UTC m=+1480.878358656" lastFinishedPulling="2025-11-11 13:54:45.702402001 +0000 UTC m=+1496.362691630" observedRunningTime="2025-11-11 13:54:51.252527926 +0000 UTC m=+1501.912817545" watchObservedRunningTime="2025-11-11 13:54:51.261501179 +0000 UTC m=+1501.921790788" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.268622 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" podStartSLOduration=7.627893103 podStartE2EDuration="22.268603294s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.007982064 +0000 UTC m=+1481.668271683" lastFinishedPulling="2025-11-11 13:54:45.648692255 +0000 UTC m=+1496.308981874" observedRunningTime="2025-11-11 13:54:51.266984952 +0000 UTC m=+1501.927274571" watchObservedRunningTime="2025-11-11 13:54:51.268603294 +0000 UTC m=+1501.928892913" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.288574 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" podStartSLOduration=7.567822636 podStartE2EDuration="22.288550764s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.944033395 +0000 UTC m=+1481.604323014" lastFinishedPulling="2025-11-11 13:54:45.664761523 +0000 UTC m=+1496.325051142" observedRunningTime="2025-11-11 13:54:51.288521303 +0000 UTC m=+1501.948810922" watchObservedRunningTime="2025-11-11 13:54:51.288550764 +0000 UTC m=+1501.948840383" Nov 11 13:54:51 crc kubenswrapper[4842]: I1111 13:54:51.344680 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" podStartSLOduration=7.658309923 podStartE2EDuration="22.344659875s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.006784616 +0000 UTC m=+1481.667074235" lastFinishedPulling="2025-11-11 13:54:45.693134568 +0000 UTC m=+1496.353424187" observedRunningTime="2025-11-11 13:54:51.340947537 +0000 UTC m=+1502.001237156" watchObservedRunningTime="2025-11-11 13:54:51.344659875 +0000 UTC m=+1502.004949504" Nov 11 13:54:52 crc kubenswrapper[4842]: I1111 13:54:52.117661 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.119826 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" event={"ID":"dcb386ac-da43-4629-a57f-1d272c31bd46","Type":"ContainerStarted","Data":"d701505098a4a0604ff2af889edf435e164220df86f2393ca5fa3a4320065788"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.122195 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" event={"ID":"f179c06b-83ea-4ece-b789-7bb5d75e05d5","Type":"ContainerStarted","Data":"f0ac6f29a4c9bd97863ec36a1dc84acfeda929cbedbcf7880b6e4d2ae07b7555"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.122362 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.124216 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" event={"ID":"3f8fa56e-98d6-4af9-9ea6-13917e0c5aee","Type":"ContainerStarted","Data":"c6d240051b7c0244ad77ef8413e876fcbc20a30aed0f8d49240b8c7ac1c12556"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.124370 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.125986 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" event={"ID":"cfcfc6be-d566-4ba4-87e9-6157d249adc0","Type":"ContainerStarted","Data":"8b87da4ad8f61d1d3815d5c544d50e7133703757c0a738153168d163a9e34c18"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.126147 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.128057 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" event={"ID":"85789962-b64f-422a-a2b4-4f98a786be81","Type":"ContainerStarted","Data":"99f9599cf97712f7dc02b3c24a56e3a6b5590fd6508b9041a1ccbd54b48fbfe5"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.128258 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.129367 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" event={"ID":"79eedf2f-0af7-46fa-aa0e-7d965ee918d3","Type":"ContainerStarted","Data":"195eced9226b90c7727cca03ac8570618d54226626bb42f3e376889c684088d6"} Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.129522 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.137311 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-phmkh" podStartSLOduration=3.424652079 podStartE2EDuration="26.137295797s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.421789778 +0000 UTC m=+1482.082079397" lastFinishedPulling="2025-11-11 13:54:54.134433496 +0000 UTC m=+1504.794723115" observedRunningTime="2025-11-11 13:54:55.132357291 +0000 UTC m=+1505.792646910" watchObservedRunningTime="2025-11-11 13:54:55.137295797 +0000 UTC m=+1505.797585416" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.147093 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" podStartSLOduration=3.47383847 podStartE2EDuration="26.147074035s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.514116602 +0000 UTC m=+1482.174406221" lastFinishedPulling="2025-11-11 13:54:54.187352167 +0000 UTC m=+1504.847641786" observedRunningTime="2025-11-11 13:54:55.145739433 +0000 UTC m=+1505.806029052" watchObservedRunningTime="2025-11-11 13:54:55.147074035 +0000 UTC m=+1505.807363654" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.169926 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" podStartSLOduration=3.393485334 podStartE2EDuration="26.169907576s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.414276831 +0000 UTC m=+1482.074566450" lastFinishedPulling="2025-11-11 13:54:54.190699083 +0000 UTC m=+1504.850988692" observedRunningTime="2025-11-11 13:54:55.163826974 +0000 UTC m=+1505.824116603" watchObservedRunningTime="2025-11-11 13:54:55.169907576 +0000 UTC m=+1505.830197195" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.192769 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" podStartSLOduration=3.429977065 podStartE2EDuration="26.192746387s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.372701698 +0000 UTC m=+1482.032991317" lastFinishedPulling="2025-11-11 13:54:54.13547102 +0000 UTC m=+1504.795760639" observedRunningTime="2025-11-11 13:54:55.189907677 +0000 UTC m=+1505.850197296" watchObservedRunningTime="2025-11-11 13:54:55.192746387 +0000 UTC m=+1505.853035996" Nov 11 13:54:55 crc kubenswrapper[4842]: I1111 13:54:55.209722 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" podStartSLOduration=3.41999791 podStartE2EDuration="26.209700692s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.421634273 +0000 UTC m=+1482.081923892" lastFinishedPulling="2025-11-11 13:54:54.211337055 +0000 UTC m=+1504.871626674" observedRunningTime="2025-11-11 13:54:55.204033543 +0000 UTC m=+1505.864323172" watchObservedRunningTime="2025-11-11 13:54:55.209700692 +0000 UTC m=+1505.869990311" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.376748 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6999776966-pnbdh" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.395282 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" podStartSLOduration=7.623193844 podStartE2EDuration="30.395260409s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.414471717 +0000 UTC m=+1482.074761336" lastFinishedPulling="2025-11-11 13:54:54.186538282 +0000 UTC m=+1504.846827901" observedRunningTime="2025-11-11 13:54:55.233189684 +0000 UTC m=+1505.893479373" watchObservedRunningTime="2025-11-11 13:54:59.395260409 +0000 UTC m=+1510.055550028" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.448476 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-6b57d4f86f-pkdl8" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.543489 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5c68d88c57-2k92j" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.551754 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-8444f8f688-gl575" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.604474 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-697bcb486c-xcdsm" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.746438 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-8588b44bb6-2m4gd" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.768864 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-97dc668d8-scbz4" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.849460 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6559d764b4-ntbtw" Nov 11 13:54:59 crc kubenswrapper[4842]: I1111 13:54:59.960112 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-57cf4f487c-8hwbs" Nov 11 13:55:00 crc kubenswrapper[4842]: I1111 13:55:00.213525 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5cc784f744-5p2r8" Nov 11 13:55:00 crc kubenswrapper[4842]: I1111 13:55:00.243862 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-66ff8cb84f-nqlnk" Nov 11 13:55:00 crc kubenswrapper[4842]: I1111 13:55:00.349195 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6c495746fb-mgjxt" Nov 11 13:55:00 crc kubenswrapper[4842]: I1111 13:55:00.765168 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-54948dd897l2jn8" Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.168080 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" event={"ID":"6c3ed8b6-85b5-402b-994a-ca068cc5a357","Type":"ContainerStarted","Data":"8a949c411f6f7469abd638fc53c3f12715a6e3cad43a17f949aa6473e21223ff"} Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.168375 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.172718 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" event={"ID":"0b0bd151-ad85-46db-8425-fe640a956d01","Type":"ContainerStarted","Data":"4b39e5f81452acea3172c2aaae7eaae2df5e310e674ca5996f0442753e093f57"} Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.172934 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.188174 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" podStartSLOduration=3.031948091 podStartE2EDuration="32.18815489s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.372384298 +0000 UTC m=+1482.032673917" lastFinishedPulling="2025-11-11 13:55:00.528591097 +0000 UTC m=+1511.188880716" observedRunningTime="2025-11-11 13:55:01.186441686 +0000 UTC m=+1511.846731305" watchObservedRunningTime="2025-11-11 13:55:01.18815489 +0000 UTC m=+1511.848444509" Nov 11 13:55:01 crc kubenswrapper[4842]: I1111 13:55:01.206160 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" podStartSLOduration=2.996304174 podStartE2EDuration="32.206138627s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:31.317927359 +0000 UTC m=+1481.978216978" lastFinishedPulling="2025-11-11 13:55:00.527761812 +0000 UTC m=+1511.188051431" observedRunningTime="2025-11-11 13:55:01.202662198 +0000 UTC m=+1511.862951817" watchObservedRunningTime="2025-11-11 13:55:01.206138627 +0000 UTC m=+1511.866428256" Nov 11 13:55:02 crc kubenswrapper[4842]: I1111 13:55:02.182776 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" event={"ID":"b7462081-0162-4bd6-96fe-23a8c29df0db","Type":"ContainerStarted","Data":"e7b08a9fc24cb836fa553512b291ce32cecedb2ba4d875cfda39590038f27382"} Nov 11 13:55:02 crc kubenswrapper[4842]: I1111 13:55:02.183281 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:55:02 crc kubenswrapper[4842]: I1111 13:55:02.198974 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" podStartSLOduration=1.631579412 podStartE2EDuration="33.198960311s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.050040422 +0000 UTC m=+1480.710330051" lastFinishedPulling="2025-11-11 13:55:01.617421331 +0000 UTC m=+1512.277710950" observedRunningTime="2025-11-11 13:55:02.194666555 +0000 UTC m=+1512.854956174" watchObservedRunningTime="2025-11-11 13:55:02.198960311 +0000 UTC m=+1512.859249930" Nov 11 13:55:04 crc kubenswrapper[4842]: I1111 13:55:04.199659 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" event={"ID":"fae3f9e9-7308-454c-80e2-c836cfa04a44","Type":"ContainerStarted","Data":"64bc0bcde7e7107bf27b53b4d4264bf711d9de0b62a3007d25d68269f75c5548"} Nov 11 13:55:04 crc kubenswrapper[4842]: I1111 13:55:04.200198 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:55:04 crc kubenswrapper[4842]: I1111 13:55:04.216750 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" podStartSLOduration=2.65941942 podStartE2EDuration="35.216724531s" podCreationTimestamp="2025-11-11 13:54:29 +0000 UTC" firstStartedPulling="2025-11-11 13:54:30.947314719 +0000 UTC m=+1481.607604348" lastFinishedPulling="2025-11-11 13:55:03.50461984 +0000 UTC m=+1514.164909459" observedRunningTime="2025-11-11 13:55:04.214878512 +0000 UTC m=+1514.875168131" watchObservedRunningTime="2025-11-11 13:55:04.216724531 +0000 UTC m=+1514.877014150" Nov 11 13:55:09 crc kubenswrapper[4842]: I1111 13:55:09.351840 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8dffd86b7-rldzd" Nov 11 13:55:09 crc kubenswrapper[4842]: I1111 13:55:09.575855 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-67c5b7495b-2sfch" Nov 11 13:55:09 crc kubenswrapper[4842]: I1111 13:55:09.926593 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-776bc4cb49-lh5x5" Nov 11 13:55:10 crc kubenswrapper[4842]: I1111 13:55:10.127851 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-64cbcd8bcf-b9q8b" Nov 11 13:55:16 crc kubenswrapper[4842]: I1111 13:55:16.481675 4842 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","poda7e10df1-f131-4fba-99e0-117a30f01ba0"] err="unable to destroy cgroup paths for cgroup [kubepods burstable poda7e10df1-f131-4fba-99e0-117a30f01ba0] : Timed out while waiting for systemd to remove kubepods-burstable-poda7e10df1_f131_4fba_99e0_117a30f01ba0.slice" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.548645 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:55:27 crc kubenswrapper[4842]: E1111 13:55:27.563699 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="registry-server" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.563736 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="registry-server" Nov 11 13:55:27 crc kubenswrapper[4842]: E1111 13:55:27.563750 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="extract-content" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.563756 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="extract-content" Nov 11 13:55:27 crc kubenswrapper[4842]: E1111 13:55:27.563783 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="extract-utilities" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.563791 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="extract-utilities" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.564148 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e10df1-f131-4fba-99e0-117a30f01ba0" containerName="registry-server" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.572751 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.577616 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.577812 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-c22r6" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.578904 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.605002 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.611736 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.621205 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.622547 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.626758 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.629401 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.632757 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.632834 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swb65\" (UniqueName: \"kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.734475 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dnqh\" (UniqueName: \"kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.734539 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.734613 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swb65\" (UniqueName: \"kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.734684 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.734806 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.735644 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.762509 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swb65\" (UniqueName: \"kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65\") pod \"dnsmasq-dns-59bcb7667c-mqzqx\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.836090 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.836294 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dnqh\" (UniqueName: \"kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.836328 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.837332 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.839027 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.856969 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dnqh\" (UniqueName: \"kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh\") pod \"dnsmasq-dns-79b59bc7df-s4fqc\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.904473 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:27 crc kubenswrapper[4842]: I1111 13:55:27.942423 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:28 crc kubenswrapper[4842]: I1111 13:55:28.357798 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:55:28 crc kubenswrapper[4842]: I1111 13:55:28.433044 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:29 crc kubenswrapper[4842]: I1111 13:55:29.362749 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" event={"ID":"aacd6e8e-2a77-48e4-8c9d-5de4409529cb","Type":"ContainerStarted","Data":"f7125eb5b0de7a058ab94f99bb0e218e25dbeb3ac65cd6e1421d5145cf635993"} Nov 11 13:55:29 crc kubenswrapper[4842]: I1111 13:55:29.363890 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" event={"ID":"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c","Type":"ContainerStarted","Data":"708412243f6a556e95bb4fb15ad009ed87ceee085e0d47e5ae6d6998fbff95a0"} Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.667440 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.695861 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.702612 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.703590 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.799844 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz2ww\" (UniqueName: \"kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.799893 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.799925 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.901937 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz2ww\" (UniqueName: \"kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.902000 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.902067 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.903220 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.903583 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.930148 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz2ww\" (UniqueName: \"kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww\") pod \"dnsmasq-dns-6b7457fc65-rzp27\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:31 crc kubenswrapper[4842]: I1111 13:55:31.980832 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.001768 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.003437 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.012617 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.034935 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.109742 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.109791 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftrd4\" (UniqueName: \"kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.109969 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.216478 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.216574 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.216604 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftrd4\" (UniqueName: \"kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.217772 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.217844 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.260907 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftrd4\" (UniqueName: \"kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4\") pod \"dnsmasq-dns-76fc87ffc5-kls8n\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.329533 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.341993 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.371167 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.372964 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.404800 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.419616 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.420255 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzddh\" (UniqueName: \"kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.420310 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.521803 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzddh\" (UniqueName: \"kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.521883 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.521953 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.522800 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.523641 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.540308 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzddh\" (UniqueName: \"kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh\") pod \"dnsmasq-dns-5f78b5c955-jpbvq\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.703054 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:32 crc kubenswrapper[4842]: W1111 13:55:32.720687 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f9475c8_5c6d_4c00_8205_6d871e1aae11.slice/crio-b5ba5603cced756ebba308f531042e704d85648839443fb0407a89e5a3873fbe WatchSource:0}: Error finding container b5ba5603cced756ebba308f531042e704d85648839443fb0407a89e5a3873fbe: Status 404 returned error can't find the container with id b5ba5603cced756ebba308f531042e704d85648839443fb0407a89e5a3873fbe Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.736766 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.843282 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.848221 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.856803 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.860435 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.860579 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.860784 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.863204 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.863286 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.863595 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-sttpk" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.864587 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935029 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935072 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935118 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935135 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935156 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935177 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935191 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44hbq\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935228 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935248 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935267 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:32 crc kubenswrapper[4842]: I1111 13:55:32.935293 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.015955 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:55:33 crc kubenswrapper[4842]: W1111 13:55:33.031389 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54172e64_3a84_46c5_aa74_1d8465151695.slice/crio-8d3b62748e9065f4611766915c59652044d41a9c6a02b69967f60aa134017a51 WatchSource:0}: Error finding container 8d3b62748e9065f4611766915c59652044d41a9c6a02b69967f60aa134017a51: Status 404 returned error can't find the container with id 8d3b62748e9065f4611766915c59652044d41a9c6a02b69967f60aa134017a51 Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.037694 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.037813 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.037907 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.037954 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038002 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038058 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038377 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038489 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038716 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44hbq\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038805 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.038858 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.039384 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.040452 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.040748 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.042217 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.044250 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.051839 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.057258 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.057676 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.057754 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.058167 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.061775 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44hbq\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.112967 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.173153 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.174629 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.177506 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.177731 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.177845 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.178335 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.178465 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.178603 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mt5mv" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.178972 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.181798 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.194922 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248126 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248477 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248508 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248525 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248556 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248579 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248606 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248628 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248760 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248821 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.248846 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whvlf\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.300625 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:55:33 crc kubenswrapper[4842]: W1111 13:55:33.341383 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a312a1c_7421_4242_ba9d_6da4e35bac28.slice/crio-7294ac7cb1f7d4c104733b37535f6b6377e8f1aef227a68f9a3b50966817234a WatchSource:0}: Error finding container 7294ac7cb1f7d4c104733b37535f6b6377e8f1aef227a68f9a3b50966817234a: Status 404 returned error can't find the container with id 7294ac7cb1f7d4c104733b37535f6b6377e8f1aef227a68f9a3b50966817234a Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354037 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354119 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354143 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whvlf\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354194 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354222 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354265 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354288 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354332 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354368 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354404 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.354488 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.355217 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.355594 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.356539 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.356578 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.357826 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.360242 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.362541 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.362751 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.363231 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.364162 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.378600 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whvlf\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.386087 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.501158 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" event={"ID":"54172e64-3a84-46c5-aa74-1d8465151695","Type":"ContainerStarted","Data":"8d3b62748e9065f4611766915c59652044d41a9c6a02b69967f60aa134017a51"} Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.506523 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.507138 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" event={"ID":"6f9475c8-5c6d-4c00-8205-6d871e1aae11","Type":"ContainerStarted","Data":"b5ba5603cced756ebba308f531042e704d85648839443fb0407a89e5a3873fbe"} Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.509687 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.510990 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.519742 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.519810 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-cl4b5" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.519837 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.519868 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.519753 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.524162 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" event={"ID":"5a312a1c-7421-4242-ba9d-6da4e35bac28","Type":"ContainerStarted","Data":"7294ac7cb1f7d4c104733b37535f6b6377e8f1aef227a68f9a3b50966817234a"} Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.525428 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.532242 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.539289 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558459 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558523 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558550 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558574 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558599 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558632 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558669 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558696 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558721 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558735 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.558751 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgn95\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-kube-api-access-sgn95\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.660022 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.661432 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.661925 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.661370 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.661983 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662016 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662070 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662088 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgn95\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-kube-api-access-sgn95\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662118 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662153 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662177 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662225 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662327 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662901 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.662913 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.663047 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.663296 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.664258 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.666348 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.672275 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.679612 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.683797 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgn95\" (UniqueName: \"kubernetes.io/projected/121e4ffa-c7c1-40ef-a668-500b2cc8fba6-kube-api-access-sgn95\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.695894 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"121e4ffa-c7c1-40ef-a668-500b2cc8fba6\") " pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.814780 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 13:55:33 crc kubenswrapper[4842]: I1111 13:55:33.841356 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:55:33 crc kubenswrapper[4842]: W1111 13:55:33.843583 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd8c2bc7_52e3_408c_8a72_3e5978b30a42.slice/crio-49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528 WatchSource:0}: Error finding container 49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528: Status 404 returned error can't find the container with id 49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528 Nov 11 13:55:34 crc kubenswrapper[4842]: I1111 13:55:34.100642 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 13:55:34 crc kubenswrapper[4842]: W1111 13:55:34.129625 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13087f6b_10cb_421a_b695_84006a81506f.slice/crio-15da46f82d0c034a572c091a9b5cfd380013c902c2c6e847e2cd33f797be9c94 WatchSource:0}: Error finding container 15da46f82d0c034a572c091a9b5cfd380013c902c2c6e847e2cd33f797be9c94: Status 404 returned error can't find the container with id 15da46f82d0c034a572c091a9b5cfd380013c902c2c6e847e2cd33f797be9c94 Nov 11 13:55:34 crc kubenswrapper[4842]: I1111 13:55:34.309691 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 11 13:55:34 crc kubenswrapper[4842]: I1111 13:55:34.552371 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"121e4ffa-c7c1-40ef-a668-500b2cc8fba6","Type":"ContainerStarted","Data":"f9df3793bc527a016f091f4a4e1b60125084950c46efeec25649705f4e99dee5"} Nov 11 13:55:34 crc kubenswrapper[4842]: I1111 13:55:34.554717 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerStarted","Data":"15da46f82d0c034a572c091a9b5cfd380013c902c2c6e847e2cd33f797be9c94"} Nov 11 13:55:34 crc kubenswrapper[4842]: I1111 13:55:34.559306 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerStarted","Data":"49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528"} Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.415567 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.417422 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.420890 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.420995 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.427049 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.427411 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-tjqbr" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.431642 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.431737 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.435372 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.593916 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-secrets\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594017 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594051 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kolla-config\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594081 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594135 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hstzs\" (UniqueName: \"kubernetes.io/projected/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kube-api-access-hstzs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594169 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594223 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-default\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594257 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.594281 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.707939 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708003 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708044 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-secrets\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708123 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708146 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kolla-config\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708177 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708212 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hstzs\" (UniqueName: \"kubernetes.io/projected/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kube-api-access-hstzs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708243 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.708292 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-default\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.709491 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-default\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.709748 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.718424 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kolla-config\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.720058 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.720332 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.725582 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-secrets\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.726749 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.740334 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.750935 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hstzs\" (UniqueName: \"kubernetes.io/projected/c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca-kube-api-access-hstzs\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:35 crc kubenswrapper[4842]: I1111 13:55:35.880198 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-0\" (UID: \"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca\") " pod="openstack/openstack-galera-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.056006 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.878916 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.880609 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.884449 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.886063 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.886552 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.886705 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-nhngq" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.886748 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.890623 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.890768 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-q5x9r" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.891598 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.891885 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.897693 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.923578 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.942953 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.943005 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-config-data\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.943136 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.943158 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-kolla-config\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:36 crc kubenswrapper[4842]: I1111 13:55:36.943193 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhl9h\" (UniqueName: \"kubernetes.io/projected/fd2b40ae-3270-4f5b-9700-026adaf919ca-kube-api-access-hhl9h\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045169 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045241 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045271 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045314 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045346 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-kolla-config\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045377 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxpmg\" (UniqueName: \"kubernetes.io/projected/a6225291-4a01-43af-ba67-f5281c2bd436-kube-api-access-xxpmg\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045401 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045435 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045457 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045480 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045505 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhl9h\" (UniqueName: \"kubernetes.io/projected/fd2b40ae-3270-4f5b-9700-026adaf919ca-kube-api-access-hhl9h\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045539 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045583 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-config-data\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.045636 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.046771 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-kolla-config\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.047136 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd2b40ae-3270-4f5b-9700-026adaf919ca-config-data\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.052385 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.052885 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd2b40ae-3270-4f5b-9700-026adaf919ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.087204 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhl9h\" (UniqueName: \"kubernetes.io/projected/fd2b40ae-3270-4f5b-9700-026adaf919ca-kube-api-access-hhl9h\") pod \"memcached-0\" (UID: \"fd2b40ae-3270-4f5b-9700-026adaf919ca\") " pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148148 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148412 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148486 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148536 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148611 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148891 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.148983 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxpmg\" (UniqueName: \"kubernetes.io/projected/a6225291-4a01-43af-ba67-f5281c2bd436-kube-api-access-xxpmg\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.149001 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.149030 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.149051 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.149077 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.149363 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.150198 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.150295 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a6225291-4a01-43af-ba67-f5281c2bd436-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.155699 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.156568 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.174074 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6225291-4a01-43af-ba67-f5281c2bd436-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.174397 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxpmg\" (UniqueName: \"kubernetes.io/projected/a6225291-4a01-43af-ba67-f5281c2bd436-kube-api-access-xxpmg\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.195919 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a6225291-4a01-43af-ba67-f5281c2bd436\") " pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.215669 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 11 13:55:37 crc kubenswrapper[4842]: I1111 13:55:37.247498 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.150028 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.151751 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.164075 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-hhd5k" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.170374 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.296969 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2v2z\" (UniqueName: \"kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z\") pod \"kube-state-metrics-0\" (UID: \"dc76c919-6a74-4be1-8142-3200604d22aa\") " pod="openstack/kube-state-metrics-0" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.401512 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2v2z\" (UniqueName: \"kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z\") pod \"kube-state-metrics-0\" (UID: \"dc76c919-6a74-4be1-8142-3200604d22aa\") " pod="openstack/kube-state-metrics-0" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.422535 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2v2z\" (UniqueName: \"kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z\") pod \"kube-state-metrics-0\" (UID: \"dc76c919-6a74-4be1-8142-3200604d22aa\") " pod="openstack/kube-state-metrics-0" Nov 11 13:55:39 crc kubenswrapper[4842]: I1111 13:55:39.520795 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.438112 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.442145 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.444841 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-tcncx" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.444849 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.447237 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.447452 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.447563 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.448588 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.456269 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637768 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637824 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637884 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637907 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637950 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.637979 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.638205 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px57j\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.638286 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739530 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px57j\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739594 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739650 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739677 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739741 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739767 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739804 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.739829 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.741039 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.748703 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.749053 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.749598 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.757354 4842 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.757393 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6d1d4d77a4f1fd1afa0791ead5af16f820dac5d1fa2885ec7edd11054a9ebc3/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.757547 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px57j\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.762896 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.776918 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:40 crc kubenswrapper[4842]: I1111 13:55:40.807901 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:41 crc kubenswrapper[4842]: I1111 13:55:41.071279 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.763658 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.765523 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.768315 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.768511 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.768655 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.768817 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.768963 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-9dqf5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.781300 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.873554 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f7sn5"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.875166 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.877215 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.877424 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-wp5qz" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.877626 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881027 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881080 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-config\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881121 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881153 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881175 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r74g\" (UniqueName: \"kubernetes.io/projected/9f712abc-0d24-4fc2-a103-c102a8833466-kube-api-access-9r74g\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881205 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881231 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.881254 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.886220 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f7sn5"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.936551 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-cc6nz"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.938457 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.955855 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-cc6nz"] Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.982870 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.982954 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.982984 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-config\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983057 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2qpp\" (UniqueName: \"kubernetes.io/projected/6ae937ce-ab8b-471f-b809-821ca6f23ecd-kube-api-access-b2qpp\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983115 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983153 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-combined-ca-bundle\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983320 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983373 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983416 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r74g\" (UniqueName: \"kubernetes.io/projected/9f712abc-0d24-4fc2-a103-c102a8833466-kube-api-access-9r74g\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983462 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983473 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983806 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983910 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983965 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-ovn-controller-tls-certs\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.983986 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-log-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.984028 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ae937ce-ab8b-471f-b809-821ca6f23ecd-scripts\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.984173 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.984457 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-config\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.984994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f712abc-0d24-4fc2-a103-c102a8833466-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.993013 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.993036 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:42 crc kubenswrapper[4842]: I1111 13:55:42.993676 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f712abc-0d24-4fc2-a103-c102a8833466-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.017651 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.017868 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r74g\" (UniqueName: \"kubernetes.io/projected/9f712abc-0d24-4fc2-a103-c102a8833466-kube-api-access-9r74g\") pod \"ovsdbserver-nb-0\" (UID: \"9f712abc-0d24-4fc2-a103-c102a8833466\") " pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.085498 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-etc-ovs\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.085600 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-lib\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.085688 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2qpp\" (UniqueName: \"kubernetes.io/projected/6ae937ce-ab8b-471f-b809-821ca6f23ecd-kube-api-access-b2qpp\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.085750 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-combined-ca-bundle\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.085785 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.086295 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.086918 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-ovn-controller-tls-certs\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088416 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-log-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088460 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-run\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088499 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ae937ce-ab8b-471f-b809-821ca6f23ecd-scripts\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088548 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpx4j\" (UniqueName: \"kubernetes.io/projected/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-kube-api-access-kpx4j\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088582 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-log\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088602 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-scripts\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.088632 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.089377 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-log-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.089844 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-combined-ca-bundle\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.089944 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ae937ce-ab8b-471f-b809-821ca6f23ecd-var-run-ovn\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.091526 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ae937ce-ab8b-471f-b809-821ca6f23ecd-scripts\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.101222 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.101499 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2qpp\" (UniqueName: \"kubernetes.io/projected/6ae937ce-ab8b-471f-b809-821ca6f23ecd-kube-api-access-b2qpp\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.105956 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ae937ce-ab8b-471f-b809-821ca6f23ecd-ovn-controller-tls-certs\") pod \"ovn-controller-f7sn5\" (UID: \"6ae937ce-ab8b-471f-b809-821ca6f23ecd\") " pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.189772 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-lib\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.190978 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-run\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191126 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpx4j\" (UniqueName: \"kubernetes.io/projected/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-kube-api-access-kpx4j\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-log\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191197 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-scripts\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-etc-ovs\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191614 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-etc-ovs\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.190483 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-lib\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191714 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-log\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.191782 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-var-run\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.196936 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-scripts\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.197256 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.207370 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpx4j\" (UniqueName: \"kubernetes.io/projected/e04e103c-eb86-4e27-b5ac-0d4faf32d1f5-kube-api-access-kpx4j\") pod \"ovn-controller-ovs-cc6nz\" (UID: \"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5\") " pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:43 crc kubenswrapper[4842]: I1111 13:55:43.252426 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.256775 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.258677 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.263295 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.263660 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-gzkqp" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.263779 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.269021 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.272080 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452350 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452408 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-config\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452474 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452523 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452547 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452573 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.452663 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrmr6\" (UniqueName: \"kubernetes.io/projected/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-kube-api-access-xrmr6\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.453084 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554411 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrmr6\" (UniqueName: \"kubernetes.io/projected/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-kube-api-access-xrmr6\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554469 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554500 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554517 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-config\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554560 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554600 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554622 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.554644 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.555258 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.556250 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-config\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.556255 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.556431 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.560116 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.560666 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.562386 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.578833 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrmr6\" (UniqueName: \"kubernetes.io/projected/435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85-kube-api-access-xrmr6\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.587358 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85\") " pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:46 crc kubenswrapper[4842]: I1111 13:55:46.887014 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.203310 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.203820 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.203952 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-44hbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(dd8c2bc7-52e3-408c-8a72-3e5978b30a42): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.205239 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.219184 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.219236 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.219961 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-whvlf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(13087f6b-10cb-421a-b695-84006a81506f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.220172 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.220236 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.220381 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgn95,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-notifications-server-0_openstack(121e4ffa-c7c1-40ef-a668-500b2cc8fba6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.221699 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-notifications-server-0" podUID="121e4ffa-c7c1-40ef-a668-500b2cc8fba6" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.222359 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="13087f6b-10cb-421a-b695-84006a81506f" Nov 11 13:55:54 crc kubenswrapper[4842]: I1111 13:55:54.561740 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.806757 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-notifications-server-0" podUID="121e4ffa-c7c1-40ef-a668-500b2cc8fba6" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.807110 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-server-0" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" Nov 11 13:55:54 crc kubenswrapper[4842]: E1111 13:55:54.807511 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-rabbitmq:watcher_latest\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="13087f6b-10cb-421a-b695-84006a81506f" Nov 11 13:55:57 crc kubenswrapper[4842]: I1111 13:55:57.825778 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc76c919-6a74-4be1-8142-3200604d22aa","Type":"ContainerStarted","Data":"ba4d6af5b001dec4bef4b626256d86cfb01d8c5842a79138dc1be76b5040c24f"} Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.444752 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.444814 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.444962 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h56dh5cfh8bh54fhbbhf4h5b9hdch67fhd7h55fh55fh6ch9h548h54ch665h647h6h8fhd6h5dfh5cdh58bh577h66fh695h5fbh55h77h5fcq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rzddh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f78b5c955-jpbvq_openstack(5a312a1c-7421-4242-ba9d-6da4e35bac28): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.446398 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" podUID="5a312a1c-7421-4242-ba9d-6da4e35bac28" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.453953 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.454029 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.454171 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-swb65,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-59bcb7667c-mqzqx_openstack(aacd6e8e-2a77-48e4-8c9d-5de4409529cb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.455271 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" podUID="aacd6e8e-2a77-48e4-8c9d-5de4409529cb" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.480516 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.480574 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.480700 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hz2ww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6b7457fc65-rzp27_openstack(6f9475c8-5c6d-4c00-8205-6d871e1aae11): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.482294 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" podUID="6f9475c8-5c6d-4c00-8205-6d871e1aae11" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.531753 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.531810 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.531914 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ftrd4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-76fc87ffc5-kls8n_openstack(54172e64-3a84-46c5-aa74-1d8465151695): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.533119 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" podUID="54172e64-3a84-46c5-aa74-1d8465151695" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.632507 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.632566 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.632681 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8dnqh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-79b59bc7df-s4fqc_openstack(a7089cbb-9de8-45a3-8c25-3033d8a8cd9c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.633897 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" podUID="a7089cbb-9de8-45a3-8c25-3033d8a8cd9c" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.835664 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" podUID="54172e64-3a84-46c5-aa74-1d8465151695" Nov 11 13:55:58 crc kubenswrapper[4842]: E1111 13:55:58.839354 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-neutron-server:watcher_latest\\\"\"" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" podUID="5a312a1c-7421-4242-ba9d-6da4e35bac28" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.294473 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.314715 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc\") pod \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.314811 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz2ww\" (UniqueName: \"kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww\") pod \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.314911 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config\") pod \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\" (UID: \"6f9475c8-5c6d-4c00-8205-6d871e1aae11\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.315513 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6f9475c8-5c6d-4c00-8205-6d871e1aae11" (UID: "6f9475c8-5c6d-4c00-8205-6d871e1aae11"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.315898 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config" (OuterVolumeSpecName: "config") pod "6f9475c8-5c6d-4c00-8205-6d871e1aae11" (UID: "6f9475c8-5c6d-4c00-8205-6d871e1aae11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.318884 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.322172 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww" (OuterVolumeSpecName: "kube-api-access-hz2ww") pod "6f9475c8-5c6d-4c00-8205-6d871e1aae11" (UID: "6f9475c8-5c6d-4c00-8205-6d871e1aae11"). InnerVolumeSpecName "kube-api-access-hz2ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.358568 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.403898 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.415704 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.415993 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swb65\" (UniqueName: \"kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65\") pod \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416035 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dnqh\" (UniqueName: \"kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh\") pod \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416058 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config\") pod \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416321 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416336 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz2ww\" (UniqueName: \"kubernetes.io/projected/6f9475c8-5c6d-4c00-8205-6d871e1aae11-kube-api-access-hz2ww\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416346 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f9475c8-5c6d-4c00-8205-6d871e1aae11-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.416654 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config" (OuterVolumeSpecName: "config") pod "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c" (UID: "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.422301 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh" (OuterVolumeSpecName: "kube-api-access-8dnqh") pod "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c" (UID: "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c"). InnerVolumeSpecName "kube-api-access-8dnqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.442323 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65" (OuterVolumeSpecName: "kube-api-access-swb65") pod "aacd6e8e-2a77-48e4-8c9d-5de4409529cb" (UID: "aacd6e8e-2a77-48e4-8c9d-5de4409529cb"). InnerVolumeSpecName "kube-api-access-swb65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.466244 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.477747 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f7sn5"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.482353 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.517272 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc\") pod \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\" (UID: \"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.517402 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config\") pod \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\" (UID: \"aacd6e8e-2a77-48e4-8c9d-5de4409529cb\") " Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.518565 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c" (UID: "a7089cbb-9de8-45a3-8c25-3033d8a8cd9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.518873 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config" (OuterVolumeSpecName: "config") pod "aacd6e8e-2a77-48e4-8c9d-5de4409529cb" (UID: "aacd6e8e-2a77-48e4-8c9d-5de4409529cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.519085 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.519125 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.519137 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swb65\" (UniqueName: \"kubernetes.io/projected/aacd6e8e-2a77-48e4-8c9d-5de4409529cb-kube-api-access-swb65\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.519150 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dnqh\" (UniqueName: \"kubernetes.io/projected/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-kube-api-access-8dnqh\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.519162 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.544905 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 11 13:55:59 crc kubenswrapper[4842]: W1111 13:55:59.560374 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd2b40ae_3270_4f5b_9700_026adaf919ca.slice/crio-3c2138e838fc27603a28ef772079b26d4edead2f8914a73eca3ce761c78382b7 WatchSource:0}: Error finding container 3c2138e838fc27603a28ef772079b26d4edead2f8914a73eca3ce761c78382b7: Status 404 returned error can't find the container with id 3c2138e838fc27603a28ef772079b26d4edead2f8914a73eca3ce761c78382b7 Nov 11 13:55:59 crc kubenswrapper[4842]: W1111 13:55:59.561271 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6cfb8ab_08d0_48f5_8ffc_2a2d33107eca.slice/crio-ee13ade02285d34a5928b287e84d6dde607223c491ccce09e926ce8d0f9f5638 WatchSource:0}: Error finding container ee13ade02285d34a5928b287e84d6dde607223c491ccce09e926ce8d0f9f5638: Status 404 returned error can't find the container with id ee13ade02285d34a5928b287e84d6dde607223c491ccce09e926ce8d0f9f5638 Nov 11 13:55:59 crc kubenswrapper[4842]: W1111 13:55:59.622011 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ae937ce_ab8b_471f_b809_821ca6f23ecd.slice/crio-154c7f264453696594a2ec48facc96c12d9c490dc457ba6f061c5dd1bd366e4f WatchSource:0}: Error finding container 154c7f264453696594a2ec48facc96c12d9c490dc457ba6f061c5dd1bd366e4f: Status 404 returned error can't find the container with id 154c7f264453696594a2ec48facc96c12d9c490dc457ba6f061c5dd1bd366e4f Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.648260 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-cc6nz"] Nov 11 13:55:59 crc kubenswrapper[4842]: W1111 13:55:59.698333 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode04e103c_eb86_4e27_b5ac_0d4faf32d1f5.slice/crio-e30db38bfb4437b4b5bb8e79206288e410f15ff8b90edb8d719cf5061123d006 WatchSource:0}: Error finding container e30db38bfb4437b4b5bb8e79206288e410f15ff8b90edb8d719cf5061123d006: Status 404 returned error can't find the container with id e30db38bfb4437b4b5bb8e79206288e410f15ff8b90edb8d719cf5061123d006 Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.841291 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" event={"ID":"6f9475c8-5c6d-4c00-8205-6d871e1aae11","Type":"ContainerDied","Data":"b5ba5603cced756ebba308f531042e704d85648839443fb0407a89e5a3873fbe"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.841384 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7457fc65-rzp27" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.847590 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.847654 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b59bc7df-s4fqc" event={"ID":"a7089cbb-9de8-45a3-8c25-3033d8a8cd9c","Type":"ContainerDied","Data":"708412243f6a556e95bb4fb15ad009ed87ceee085e0d47e5ae6d6998fbff95a0"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.850429 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cc6nz" event={"ID":"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5","Type":"ContainerStarted","Data":"e30db38bfb4437b4b5bb8e79206288e410f15ff8b90edb8d719cf5061123d006"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.853074 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerStarted","Data":"1bcf0b49382156844d211c77ec8a2384e5ea7c87b32390859da7686c2324afa0"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.857214 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fd2b40ae-3270-4f5b-9700-026adaf919ca","Type":"ContainerStarted","Data":"3c2138e838fc27603a28ef772079b26d4edead2f8914a73eca3ce761c78382b7"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.859316 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85","Type":"ContainerStarted","Data":"1d5f247463036634205f507b79281dd366b88e3a12b94d05a0aebb16c4626adb"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.861337 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6225291-4a01-43af-ba67-f5281c2bd436","Type":"ContainerStarted","Data":"8b6b97ebf96dd572475b66f1b7fffa59b5ee519b429bcb8e45114e4810a51647"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.862805 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca","Type":"ContainerStarted","Data":"ee13ade02285d34a5928b287e84d6dde607223c491ccce09e926ce8d0f9f5638"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.864172 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f7sn5" event={"ID":"6ae937ce-ab8b-471f-b809-821ca6f23ecd","Type":"ContainerStarted","Data":"154c7f264453696594a2ec48facc96c12d9c490dc457ba6f061c5dd1bd366e4f"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.866052 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" event={"ID":"aacd6e8e-2a77-48e4-8c9d-5de4409529cb","Type":"ContainerDied","Data":"f7125eb5b0de7a058ab94f99bb0e218e25dbeb3ac65cd6e1421d5145cf635993"} Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.866087 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59bcb7667c-mqzqx" Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.942691 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.948234 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7457fc65-rzp27"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.960365 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.966769 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b59bc7df-s4fqc"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.980021 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:55:59 crc kubenswrapper[4842]: I1111 13:55:59.986019 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59bcb7667c-mqzqx"] Nov 11 13:56:00 crc kubenswrapper[4842]: I1111 13:56:00.070317 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f9475c8-5c6d-4c00-8205-6d871e1aae11" path="/var/lib/kubelet/pods/6f9475c8-5c6d-4c00-8205-6d871e1aae11/volumes" Nov 11 13:56:00 crc kubenswrapper[4842]: I1111 13:56:00.070755 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7089cbb-9de8-45a3-8c25-3033d8a8cd9c" path="/var/lib/kubelet/pods/a7089cbb-9de8-45a3-8c25-3033d8a8cd9c/volumes" Nov 11 13:56:00 crc kubenswrapper[4842]: I1111 13:56:00.071536 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aacd6e8e-2a77-48e4-8c9d-5de4409529cb" path="/var/lib/kubelet/pods/aacd6e8e-2a77-48e4-8c9d-5de4409529cb/volumes" Nov 11 13:56:00 crc kubenswrapper[4842]: I1111 13:56:00.319272 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 11 13:56:00 crc kubenswrapper[4842]: W1111 13:56:00.530985 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f712abc_0d24_4fc2_a103_c102a8833466.slice/crio-fb0074a40b5c35327ddc84365f8cda181e1995fbeae3569a12a627fe3dfa4fb5 WatchSource:0}: Error finding container fb0074a40b5c35327ddc84365f8cda181e1995fbeae3569a12a627fe3dfa4fb5: Status 404 returned error can't find the container with id fb0074a40b5c35327ddc84365f8cda181e1995fbeae3569a12a627fe3dfa4fb5 Nov 11 13:56:00 crc kubenswrapper[4842]: I1111 13:56:00.873496 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9f712abc-0d24-4fc2-a103-c102a8833466","Type":"ContainerStarted","Data":"fb0074a40b5c35327ddc84365f8cda181e1995fbeae3569a12a627fe3dfa4fb5"} Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.899334 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fd2b40ae-3270-4f5b-9700-026adaf919ca","Type":"ContainerStarted","Data":"4c39102e6112e31e18f3055f050ea3dd218538a65709a9104cc313656dd26cb5"} Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.899882 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.901932 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca","Type":"ContainerStarted","Data":"4290e935e0991967383153c4789ed17c1be66f5da1b92af1922380d647933c3c"} Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.903840 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc76c919-6a74-4be1-8142-3200604d22aa","Type":"ContainerStarted","Data":"a50bb1991daefd6c457e78d2e251872e382df15bcdd439d2bc80b69721300ca8"} Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.903960 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.920163 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=24.254951775 podStartE2EDuration="28.920144835s" podCreationTimestamp="2025-11-11 13:55:36 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.562781477 +0000 UTC m=+1570.223071096" lastFinishedPulling="2025-11-11 13:56:04.227974537 +0000 UTC m=+1574.888264156" observedRunningTime="2025-11-11 13:56:04.916248761 +0000 UTC m=+1575.576538390" watchObservedRunningTime="2025-11-11 13:56:04.920144835 +0000 UTC m=+1575.580434454" Nov 11 13:56:04 crc kubenswrapper[4842]: I1111 13:56:04.956207 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=19.521717081 podStartE2EDuration="25.956183389s" podCreationTimestamp="2025-11-11 13:55:39 +0000 UTC" firstStartedPulling="2025-11-11 13:55:57.793509619 +0000 UTC m=+1568.453799238" lastFinishedPulling="2025-11-11 13:56:04.227975927 +0000 UTC m=+1574.888265546" observedRunningTime="2025-11-11 13:56:04.952501872 +0000 UTC m=+1575.612791491" watchObservedRunningTime="2025-11-11 13:56:04.956183389 +0000 UTC m=+1575.616473018" Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.916223 4842 generic.go:334] "Generic (PLEG): container finished" podID="e04e103c-eb86-4e27-b5ac-0d4faf32d1f5" containerID="98577b7be1c8e7de633fe805136e645a33a858144d156af7ee66f52737254eaf" exitCode=0 Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.916496 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cc6nz" event={"ID":"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5","Type":"ContainerDied","Data":"98577b7be1c8e7de633fe805136e645a33a858144d156af7ee66f52737254eaf"} Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.919644 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9f712abc-0d24-4fc2-a103-c102a8833466","Type":"ContainerStarted","Data":"8daa0785f4d30b91926d4509ef83a17632946a21a761a418aec4e3295efae9e2"} Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.931473 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85","Type":"ContainerStarted","Data":"a62fdd183c53bd123d12c06aa5dbab4426fdba5fc3dc2288ad2a4e91e14fc982"} Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.933861 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6225291-4a01-43af-ba67-f5281c2bd436","Type":"ContainerStarted","Data":"e64d4260842a8d444488ac74913f36eb8d5ce74399d43eeac049dfb6ed769c72"} Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.939306 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f7sn5" event={"ID":"6ae937ce-ab8b-471f-b809-821ca6f23ecd","Type":"ContainerStarted","Data":"5e8c81ca7598d820142a5b6aff76c2940c0d4460553b5631a2e481c1a11edd13"} Nov 11 13:56:05 crc kubenswrapper[4842]: I1111 13:56:05.981780 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-f7sn5" podStartSLOduration=19.320526231 podStartE2EDuration="23.981756404s" podCreationTimestamp="2025-11-11 13:55:42 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.631280913 +0000 UTC m=+1570.291570532" lastFinishedPulling="2025-11-11 13:56:04.292511086 +0000 UTC m=+1574.952800705" observedRunningTime="2025-11-11 13:56:05.974966938 +0000 UTC m=+1576.635256567" watchObservedRunningTime="2025-11-11 13:56:05.981756404 +0000 UTC m=+1576.642046033" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.231051 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-rlvf6"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.232972 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.235438 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.249029 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rlvf6"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.337754 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-969q4\" (UniqueName: \"kubernetes.io/projected/df77e52b-398e-454a-bbf5-0bac66c17380-kube-api-access-969q4\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.339618 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.339656 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-combined-ca-bundle\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.339688 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovn-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.339767 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df77e52b-398e-454a-bbf5-0bac66c17380-config\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.339801 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovs-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.393302 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.430757 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443008 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-969q4\" (UniqueName: \"kubernetes.io/projected/df77e52b-398e-454a-bbf5-0bac66c17380-kube-api-access-969q4\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443056 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443078 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-combined-ca-bundle\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443260 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovn-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443337 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df77e52b-398e-454a-bbf5-0bac66c17380-config\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443368 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovs-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.443820 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovs-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.446317 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df77e52b-398e-454a-bbf5-0bac66c17380-ovn-rundir\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.448009 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df77e52b-398e-454a-bbf5-0bac66c17380-config\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.450920 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-combined-ca-bundle\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.452303 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.455029 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df77e52b-398e-454a-bbf5-0bac66c17380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.462231 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.469042 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.472629 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-969q4\" (UniqueName: \"kubernetes.io/projected/df77e52b-398e-454a-bbf5-0bac66c17380-kube-api-access-969q4\") pod \"ovn-controller-metrics-rlvf6\" (UID: \"df77e52b-398e-454a-bbf5-0bac66c17380\") " pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.546984 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-rlvf6" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.548337 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.548418 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.548453 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4srj\" (UniqueName: \"kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.548541 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.556493 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.630643 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.632520 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.637300 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.647873 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.650031 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.650066 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4srj\" (UniqueName: \"kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.650243 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.650280 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.651236 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.652569 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.652687 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.682891 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4srj\" (UniqueName: \"kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj\") pod \"dnsmasq-dns-54756c4575-zt48k\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.754915 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.754990 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.755023 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsz6f\" (UniqueName: \"kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.755056 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.755257 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.859735 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.861304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.861455 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.861523 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsz6f\" (UniqueName: \"kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.861607 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.861695 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.862444 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.862541 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.863246 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.863628 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.885778 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsz6f\" (UniqueName: \"kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f\") pod \"dnsmasq-dns-5cfdc9455f-6kxnv\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.892816 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.928357 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.962659 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc\") pod \"54172e64-3a84-46c5-aa74-1d8465151695\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.962756 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftrd4\" (UniqueName: \"kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4\") pod \"54172e64-3a84-46c5-aa74-1d8465151695\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.963039 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config\") pod \"54172e64-3a84-46c5-aa74-1d8465151695\" (UID: \"54172e64-3a84-46c5-aa74-1d8465151695\") " Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.963276 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "54172e64-3a84-46c5-aa74-1d8465151695" (UID: "54172e64-3a84-46c5-aa74-1d8465151695"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.963393 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.963566 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config" (OuterVolumeSpecName: "config") pod "54172e64-3a84-46c5-aa74-1d8465151695" (UID: "54172e64-3a84-46c5-aa74-1d8465151695"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.967230 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerStarted","Data":"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25"} Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.974254 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.974301 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f78b5c955-jpbvq" event={"ID":"5a312a1c-7421-4242-ba9d-6da4e35bac28","Type":"ContainerDied","Data":"7294ac7cb1f7d4c104733b37535f6b6377e8f1aef227a68f9a3b50966817234a"} Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.982060 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cc6nz" event={"ID":"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5","Type":"ContainerStarted","Data":"ab9f9a05f33cc948bf88bc56200b321dd837e3807c358be586989f52efccf15a"} Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.996180 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.996310 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fc87ffc5-kls8n" event={"ID":"54172e64-3a84-46c5-aa74-1d8465151695","Type":"ContainerDied","Data":"8d3b62748e9065f4611766915c59652044d41a9c6a02b69967f60aa134017a51"} Nov 11 13:56:06 crc kubenswrapper[4842]: I1111 13:56:06.996336 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-f7sn5" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.010852 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.052436 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4" (OuterVolumeSpecName: "kube-api-access-ftrd4") pod "54172e64-3a84-46c5-aa74-1d8465151695" (UID: "54172e64-3a84-46c5-aa74-1d8465151695"). InnerVolumeSpecName "kube-api-access-ftrd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.064701 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc\") pod \"5a312a1c-7421-4242-ba9d-6da4e35bac28\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.064780 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config\") pod \"5a312a1c-7421-4242-ba9d-6da4e35bac28\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.064811 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzddh\" (UniqueName: \"kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh\") pod \"5a312a1c-7421-4242-ba9d-6da4e35bac28\" (UID: \"5a312a1c-7421-4242-ba9d-6da4e35bac28\") " Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.065194 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5a312a1c-7421-4242-ba9d-6da4e35bac28" (UID: "5a312a1c-7421-4242-ba9d-6da4e35bac28"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.065402 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config" (OuterVolumeSpecName: "config") pod "5a312a1c-7421-4242-ba9d-6da4e35bac28" (UID: "5a312a1c-7421-4242-ba9d-6da4e35bac28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.066277 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.066299 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a312a1c-7421-4242-ba9d-6da4e35bac28-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.066309 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54172e64-3a84-46c5-aa74-1d8465151695-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.066319 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftrd4\" (UniqueName: \"kubernetes.io/projected/54172e64-3a84-46c5-aa74-1d8465151695-kube-api-access-ftrd4\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.072009 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh" (OuterVolumeSpecName: "kube-api-access-rzddh") pod "5a312a1c-7421-4242-ba9d-6da4e35bac28" (UID: "5a312a1c-7421-4242-ba9d-6da4e35bac28"). InnerVolumeSpecName "kube-api-access-rzddh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.169524 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzddh\" (UniqueName: \"kubernetes.io/projected/5a312a1c-7421-4242-ba9d-6da4e35bac28-kube-api-access-rzddh\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.174873 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-rlvf6"] Nov 11 13:56:07 crc kubenswrapper[4842]: W1111 13:56:07.188939 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf77e52b_398e_454a_bbf5_0bac66c17380.slice/crio-2a5e1f9e557cbaa2bd1276ba443851ebb98e2a9a59a1360aa3dace46a2762dbb WatchSource:0}: Error finding container 2a5e1f9e557cbaa2bd1276ba443851ebb98e2a9a59a1360aa3dace46a2762dbb: Status 404 returned error can't find the container with id 2a5e1f9e557cbaa2bd1276ba443851ebb98e2a9a59a1360aa3dace46a2762dbb Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.353814 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.365403 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f78b5c955-jpbvq"] Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.398827 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:56:07 crc kubenswrapper[4842]: W1111 13:56:07.407722 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod688010e9_90b2_466d_be58_659f03223a9c.slice/crio-3287ed660a885c903dbdc81df99fe21827752489a79b4e6ce65ad1437b9b99da WatchSource:0}: Error finding container 3287ed660a885c903dbdc81df99fe21827752489a79b4e6ce65ad1437b9b99da: Status 404 returned error can't find the container with id 3287ed660a885c903dbdc81df99fe21827752489a79b4e6ce65ad1437b9b99da Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.407801 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fc87ffc5-kls8n"] Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.417920 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:07 crc kubenswrapper[4842]: I1111 13:56:07.582449 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.003445 4842 generic.go:334] "Generic (PLEG): container finished" podID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerID="d1e55584233ec6d4c1a3b6c925b6469b5795e4cf4ae58297e1dcdf8e68dcfd5e" exitCode=0 Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.003550 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" event={"ID":"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92","Type":"ContainerDied","Data":"d1e55584233ec6d4c1a3b6c925b6469b5795e4cf4ae58297e1dcdf8e68dcfd5e"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.003615 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" event={"ID":"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92","Type":"ContainerStarted","Data":"2f2091f9309eb346bc074ef9d6ffb0a1b90225f43978165d33bae80ccdcdfac4"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.004660 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rlvf6" event={"ID":"df77e52b-398e-454a-bbf5-0bac66c17380","Type":"ContainerStarted","Data":"2a5e1f9e557cbaa2bd1276ba443851ebb98e2a9a59a1360aa3dace46a2762dbb"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.008238 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cc6nz" event={"ID":"e04e103c-eb86-4e27-b5ac-0d4faf32d1f5","Type":"ContainerStarted","Data":"1dc8bbbce8842a9f5b4878d15b8affc6f92c401c9ea1a5651c9d190b9794f139"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.008365 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.009996 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"121e4ffa-c7c1-40ef-a668-500b2cc8fba6","Type":"ContainerStarted","Data":"80f8963a1103cec4985aa1bf4d5152b260a24f6f08e57cf58bea4fbb7bc5a858"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.011710 4842 generic.go:334] "Generic (PLEG): container finished" podID="688010e9-90b2-466d-be58-659f03223a9c" containerID="f8670d1a5be72315e6691b16cde4e1b51785b0bec1870752cd3c72cb16cea98d" exitCode=0 Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.012562 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54756c4575-zt48k" event={"ID":"688010e9-90b2-466d-be58-659f03223a9c","Type":"ContainerDied","Data":"f8670d1a5be72315e6691b16cde4e1b51785b0bec1870752cd3c72cb16cea98d"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.012587 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54756c4575-zt48k" event={"ID":"688010e9-90b2-466d-be58-659f03223a9c","Type":"ContainerStarted","Data":"3287ed660a885c903dbdc81df99fe21827752489a79b4e6ce65ad1437b9b99da"} Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.040621 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-cc6nz" podStartSLOduration=21.490409027 podStartE2EDuration="26.040607346s" podCreationTimestamp="2025-11-11 13:55:42 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.701329497 +0000 UTC m=+1570.361619116" lastFinishedPulling="2025-11-11 13:56:04.251527816 +0000 UTC m=+1574.911817435" observedRunningTime="2025-11-11 13:56:08.036713452 +0000 UTC m=+1578.697003071" watchObservedRunningTime="2025-11-11 13:56:08.040607346 +0000 UTC m=+1578.700896965" Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.071328 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54172e64-3a84-46c5-aa74-1d8465151695" path="/var/lib/kubelet/pods/54172e64-3a84-46c5-aa74-1d8465151695/volumes" Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.071744 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a312a1c-7421-4242-ba9d-6da4e35bac28" path="/var/lib/kubelet/pods/5a312a1c-7421-4242-ba9d-6da4e35bac28/volumes" Nov 11 13:56:08 crc kubenswrapper[4842]: I1111 13:56:08.252719 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:56:09 crc kubenswrapper[4842]: I1111 13:56:09.525796 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.029785 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" event={"ID":"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92","Type":"ContainerStarted","Data":"e88a391af281c51117a74cfcd095b45d620a3c336944efa218b23ae5025728ba"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.030084 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.031402 4842 generic.go:334] "Generic (PLEG): container finished" podID="c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca" containerID="4290e935e0991967383153c4789ed17c1be66f5da1b92af1922380d647933c3c" exitCode=0 Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.031461 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca","Type":"ContainerDied","Data":"4290e935e0991967383153c4789ed17c1be66f5da1b92af1922380d647933c3c"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.033001 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-rlvf6" event={"ID":"df77e52b-398e-454a-bbf5-0bac66c17380","Type":"ContainerStarted","Data":"fa1138bff76afa0cb68cb0df8eb60b891cc8216111a6f80a0995b8f45b8b460e"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.041731 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"9f712abc-0d24-4fc2-a103-c102a8833466","Type":"ContainerStarted","Data":"134feb629a773195aebdae7430df23991cb19efe61e6650f55a74e2e501f45bd"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.044041 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85","Type":"ContainerStarted","Data":"0e15db6c619a3c57dd01565dca58b063b5dbfa8fbf4b8579262e8a1a3dc27117"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.046227 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54756c4575-zt48k" event={"ID":"688010e9-90b2-466d-be58-659f03223a9c","Type":"ContainerStarted","Data":"9bb200adfbac6cc8dd3e75785b854158f5246441b3d937161b4ef7eeea77d06e"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.046676 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.048012 4842 generic.go:334] "Generic (PLEG): container finished" podID="a6225291-4a01-43af-ba67-f5281c2bd436" containerID="e64d4260842a8d444488ac74913f36eb8d5ce74399d43eeac049dfb6ed769c72" exitCode=0 Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.048056 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6225291-4a01-43af-ba67-f5281c2bd436","Type":"ContainerDied","Data":"e64d4260842a8d444488ac74913f36eb8d5ce74399d43eeac049dfb6ed769c72"} Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.057640 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" podStartSLOduration=4.057621101 podStartE2EDuration="4.057621101s" podCreationTimestamp="2025-11-11 13:56:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:56:10.053228671 +0000 UTC m=+1580.713518300" watchObservedRunningTime="2025-11-11 13:56:10.057621101 +0000 UTC m=+1580.717910740" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.078987 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=20.195663086 podStartE2EDuration="29.078966909s" podCreationTimestamp="2025-11-11 13:55:41 +0000 UTC" firstStartedPulling="2025-11-11 13:56:00.534478121 +0000 UTC m=+1571.194767740" lastFinishedPulling="2025-11-11 13:56:09.417781944 +0000 UTC m=+1580.078071563" observedRunningTime="2025-11-11 13:56:10.076554402 +0000 UTC m=+1580.736844041" watchObservedRunningTime="2025-11-11 13:56:10.078966909 +0000 UTC m=+1580.739256528" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.102446 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.104470 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-rlvf6" podStartSLOduration=1.880549395 podStartE2EDuration="4.104456018s" podCreationTimestamp="2025-11-11 13:56:06 +0000 UTC" firstStartedPulling="2025-11-11 13:56:07.193780338 +0000 UTC m=+1577.854069957" lastFinishedPulling="2025-11-11 13:56:09.417686961 +0000 UTC m=+1580.077976580" observedRunningTime="2025-11-11 13:56:10.095680689 +0000 UTC m=+1580.755970308" watchObservedRunningTime="2025-11-11 13:56:10.104456018 +0000 UTC m=+1580.764745647" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.154311 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=15.368228463 podStartE2EDuration="25.15429033s" podCreationTimestamp="2025-11-11 13:55:45 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.637232192 +0000 UTC m=+1570.297521811" lastFinishedPulling="2025-11-11 13:56:09.423294059 +0000 UTC m=+1580.083583678" observedRunningTime="2025-11-11 13:56:10.147209496 +0000 UTC m=+1580.807499115" watchObservedRunningTime="2025-11-11 13:56:10.15429033 +0000 UTC m=+1580.814579949" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.159042 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.199581 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54756c4575-zt48k" podStartSLOduration=4.138156809 podStartE2EDuration="4.199557768s" podCreationTimestamp="2025-11-11 13:56:06 +0000 UTC" firstStartedPulling="2025-11-11 13:56:07.415659993 +0000 UTC m=+1578.075949612" lastFinishedPulling="2025-11-11 13:56:07.477060952 +0000 UTC m=+1578.137350571" observedRunningTime="2025-11-11 13:56:10.196224492 +0000 UTC m=+1580.856514131" watchObservedRunningTime="2025-11-11 13:56:10.199557768 +0000 UTC m=+1580.859847387" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.887471 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 11 13:56:10 crc kubenswrapper[4842]: I1111 13:56:10.929156 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.061418 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerStarted","Data":"b7c2238f1340635d5115dd9fe56f38cf43d59742baa12c7b6cb17a3e757a55bb"} Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.063684 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a6225291-4a01-43af-ba67-f5281c2bd436","Type":"ContainerStarted","Data":"4924d95a0905360888967a7abb3149d57e0d8700b0eae4a48e2a8ffee16bf6eb"} Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.066529 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca","Type":"ContainerStarted","Data":"626636be3738774abcbb335defa7ac7ba63c8e27e663fb7e855c11419a9a39b3"} Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.067371 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.067409 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.107877 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.109682 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.128540 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=31.470739259 podStartE2EDuration="36.128516994s" podCreationTimestamp="2025-11-11 13:55:35 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.632966016 +0000 UTC m=+1570.293255645" lastFinishedPulling="2025-11-11 13:56:04.290743761 +0000 UTC m=+1574.951033380" observedRunningTime="2025-11-11 13:56:11.107513528 +0000 UTC m=+1581.767803147" watchObservedRunningTime="2025-11-11 13:56:11.128516994 +0000 UTC m=+1581.788806613" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.132083 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=32.400339265 podStartE2EDuration="37.132069877s" podCreationTimestamp="2025-11-11 13:55:34 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.562823859 +0000 UTC m=+1570.223113468" lastFinishedPulling="2025-11-11 13:56:04.294554471 +0000 UTC m=+1574.954844080" observedRunningTime="2025-11-11 13:56:11.125824109 +0000 UTC m=+1581.786113728" watchObservedRunningTime="2025-11-11 13:56:11.132069877 +0000 UTC m=+1581.792359486" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.352752 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.355206 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.358861 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.359260 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.359482 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.361029 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-fvrvm" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.367016 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.445941 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446034 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446073 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446093 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-scripts\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446130 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-config\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446190 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqgtt\" (UniqueName: \"kubernetes.io/projected/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-kube-api-access-bqgtt\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.446214 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547206 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqgtt\" (UniqueName: \"kubernetes.io/projected/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-kube-api-access-bqgtt\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547259 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547350 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547403 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547431 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547457 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-scripts\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547480 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-config\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.547941 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.548499 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-config\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.548955 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-scripts\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.553789 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.554594 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.555281 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.569318 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqgtt\" (UniqueName: \"kubernetes.io/projected/e0df8170-d8dd-4ad1-9a30-d60e06fa07f7-kube-api-access-bqgtt\") pod \"ovn-northd-0\" (UID: \"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7\") " pod="openstack/ovn-northd-0" Nov 11 13:56:11 crc kubenswrapper[4842]: I1111 13:56:11.683420 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 11 13:56:12 crc kubenswrapper[4842]: I1111 13:56:12.075418 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerStarted","Data":"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf"} Nov 11 13:56:12 crc kubenswrapper[4842]: W1111 13:56:12.159383 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0df8170_d8dd_4ad1_9a30_d60e06fa07f7.slice/crio-37ee3310a64259ed4f0b4167c06532201a9771f1c454ff146884371c4d37caf2 WatchSource:0}: Error finding container 37ee3310a64259ed4f0b4167c06532201a9771f1c454ff146884371c4d37caf2: Status 404 returned error can't find the container with id 37ee3310a64259ed4f0b4167c06532201a9771f1c454ff146884371c4d37caf2 Nov 11 13:56:12 crc kubenswrapper[4842]: I1111 13:56:12.166415 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 11 13:56:12 crc kubenswrapper[4842]: I1111 13:56:12.216887 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 11 13:56:13 crc kubenswrapper[4842]: I1111 13:56:13.082654 4842 generic.go:334] "Generic (PLEG): container finished" podID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerID="56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25" exitCode=0 Nov 11 13:56:13 crc kubenswrapper[4842]: I1111 13:56:13.082730 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerDied","Data":"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25"} Nov 11 13:56:13 crc kubenswrapper[4842]: I1111 13:56:13.087815 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7","Type":"ContainerStarted","Data":"3baaeb68043666fdb87c8db0d518948075305d1d0e9fd7ce6a3a3aeedaaf1c5c"} Nov 11 13:56:13 crc kubenswrapper[4842]: I1111 13:56:13.087880 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7","Type":"ContainerStarted","Data":"37ee3310a64259ed4f0b4167c06532201a9771f1c454ff146884371c4d37caf2"} Nov 11 13:56:14 crc kubenswrapper[4842]: I1111 13:56:14.098314 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e0df8170-d8dd-4ad1-9a30-d60e06fa07f7","Type":"ContainerStarted","Data":"ef9eb04f92bd15876b66d54ac0fef3f913e202e43a4e31eb70d3d9a8038e08c6"} Nov 11 13:56:14 crc kubenswrapper[4842]: I1111 13:56:14.098712 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 11 13:56:14 crc kubenswrapper[4842]: I1111 13:56:14.125434 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.584659393 podStartE2EDuration="3.125417293s" podCreationTimestamp="2025-11-11 13:56:11 +0000 UTC" firstStartedPulling="2025-11-11 13:56:12.16107677 +0000 UTC m=+1582.821366389" lastFinishedPulling="2025-11-11 13:56:12.70183468 +0000 UTC m=+1583.362124289" observedRunningTime="2025-11-11 13:56:14.117597624 +0000 UTC m=+1584.777887253" watchObservedRunningTime="2025-11-11 13:56:14.125417293 +0000 UTC m=+1584.785706912" Nov 11 13:56:16 crc kubenswrapper[4842]: I1111 13:56:16.057334 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 11 13:56:16 crc kubenswrapper[4842]: I1111 13:56:16.057608 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 11 13:56:16 crc kubenswrapper[4842]: I1111 13:56:16.861751 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.013360 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.070617 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.125542 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54756c4575-zt48k" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="dnsmasq-dns" containerID="cri-o://9bb200adfbac6cc8dd3e75785b854158f5246441b3d937161b4ef7eeea77d06e" gracePeriod=10 Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.247801 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.247855 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 11 13:56:17 crc kubenswrapper[4842]: I1111 13:56:17.317122 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.141028 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.160866 4842 generic.go:334] "Generic (PLEG): container finished" podID="688010e9-90b2-466d-be58-659f03223a9c" containerID="9bb200adfbac6cc8dd3e75785b854158f5246441b3d937161b4ef7eeea77d06e" exitCode=0 Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.161804 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54756c4575-zt48k" event={"ID":"688010e9-90b2-466d-be58-659f03223a9c","Type":"ContainerDied","Data":"9bb200adfbac6cc8dd3e75785b854158f5246441b3d937161b4ef7eeea77d06e"} Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.243565 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.244899 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.288595 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.371944 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4srj\" (UniqueName: \"kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj\") pod \"688010e9-90b2-466d-be58-659f03223a9c\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.372035 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb\") pod \"688010e9-90b2-466d-be58-659f03223a9c\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.372232 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config\") pod \"688010e9-90b2-466d-be58-659f03223a9c\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.372277 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc\") pod \"688010e9-90b2-466d-be58-659f03223a9c\" (UID: \"688010e9-90b2-466d-be58-659f03223a9c\") " Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.380819 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj" (OuterVolumeSpecName: "kube-api-access-t4srj") pod "688010e9-90b2-466d-be58-659f03223a9c" (UID: "688010e9-90b2-466d-be58-659f03223a9c"). InnerVolumeSpecName "kube-api-access-t4srj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.418649 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config" (OuterVolumeSpecName: "config") pod "688010e9-90b2-466d-be58-659f03223a9c" (UID: "688010e9-90b2-466d-be58-659f03223a9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.433080 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "688010e9-90b2-466d-be58-659f03223a9c" (UID: "688010e9-90b2-466d-be58-659f03223a9c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.443305 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "688010e9-90b2-466d-be58-659f03223a9c" (UID: "688010e9-90b2-466d-be58-659f03223a9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.473748 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4srj\" (UniqueName: \"kubernetes.io/projected/688010e9-90b2-466d-be58-659f03223a9c-kube-api-access-t4srj\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.473778 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.473788 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:18 crc kubenswrapper[4842]: I1111 13:56:18.473797 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/688010e9-90b2-466d-be58-659f03223a9c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.171520 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerStarted","Data":"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d"} Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.173485 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54756c4575-zt48k" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.173533 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54756c4575-zt48k" event={"ID":"688010e9-90b2-466d-be58-659f03223a9c","Type":"ContainerDied","Data":"3287ed660a885c903dbdc81df99fe21827752489a79b4e6ce65ad1437b9b99da"} Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.173568 4842 scope.go:117] "RemoveContainer" containerID="9bb200adfbac6cc8dd3e75785b854158f5246441b3d937161b4ef7eeea77d06e" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.190471 4842 scope.go:117] "RemoveContainer" containerID="f8670d1a5be72315e6691b16cde4e1b51785b0bec1870752cd3c72cb16cea98d" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.209373 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.215623 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54756c4575-zt48k"] Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.543291 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:56:19 crc kubenswrapper[4842]: E1111 13:56:19.543642 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="init" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.543659 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="init" Nov 11 13:56:19 crc kubenswrapper[4842]: E1111 13:56:19.543673 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="dnsmasq-dns" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.543678 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="dnsmasq-dns" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.543848 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="688010e9-90b2-466d-be58-659f03223a9c" containerName="dnsmasq-dns" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.544738 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.572038 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.693516 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z2m7\" (UniqueName: \"kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.693627 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.693693 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.693748 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.693874 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.703378 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-kgfbg"] Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.705033 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.728465 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-kgfbg"] Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.795928 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d7fs\" (UniqueName: \"kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs\") pod \"watcher-db-create-kgfbg\" (UID: \"c6b01f49-6afc-4a28-8564-46a9cd65db71\") " pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.796004 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z2m7\" (UniqueName: \"kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.796047 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.796091 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.796145 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.796192 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.797356 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.797400 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.797600 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.797630 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.849439 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z2m7\" (UniqueName: \"kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7\") pod \"dnsmasq-dns-788ccdd4d5-nwmgv\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.866714 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.897557 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d7fs\" (UniqueName: \"kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs\") pod \"watcher-db-create-kgfbg\" (UID: \"c6b01f49-6afc-4a28-8564-46a9cd65db71\") " pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:19 crc kubenswrapper[4842]: I1111 13:56:19.951708 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d7fs\" (UniqueName: \"kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs\") pod \"watcher-db-create-kgfbg\" (UID: \"c6b01f49-6afc-4a28-8564-46a9cd65db71\") " pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.021994 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.072959 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="688010e9-90b2-466d-be58-659f03223a9c" path="/var/lib/kubelet/pods/688010e9-90b2-466d-be58-659f03223a9c/volumes" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.532012 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-kgfbg"] Nov 11 13:56:20 crc kubenswrapper[4842]: W1111 13:56:20.556384 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6b01f49_6afc_4a28_8564_46a9cd65db71.slice/crio-393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2 WatchSource:0}: Error finding container 393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2: Status 404 returned error can't find the container with id 393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2 Nov 11 13:56:20 crc kubenswrapper[4842]: W1111 13:56:20.625215 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36131c9c_3736_4825_85bc_27645ca80178.slice/crio-6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0 WatchSource:0}: Error finding container 6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0: Status 404 returned error can't find the container with id 6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0 Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.627032 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.702710 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.712445 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.713387 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.715326 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-nwn8f" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.715535 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.715709 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.715878 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.814478 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-lock\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.814555 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.814604 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-cache\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.814824 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2qtk\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-kube-api-access-n2qtk\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.814885 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917266 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917325 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-lock\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917379 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917427 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-cache\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: E1111 13:56:20.917450 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:20 crc kubenswrapper[4842]: E1111 13:56:20.917471 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917499 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2qtk\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-kube-api-access-n2qtk\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: E1111 13:56:20.917545 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:21.417524545 +0000 UTC m=+1592.077814164 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917833 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.917935 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-cache\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.926676 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2a1e5076-485c-4759-ba37-33e161741f74-lock\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.943945 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2qtk\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-kube-api-access-n2qtk\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:20 crc kubenswrapper[4842]: I1111 13:56:20.947075 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.194505 4842 generic.go:334] "Generic (PLEG): container finished" podID="c6b01f49-6afc-4a28-8564-46a9cd65db71" containerID="fff140870778287630c5082bed1b692fe99fec4a4c250e3ac13bb0e7e66fa9f7" exitCode=0 Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.194626 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kgfbg" event={"ID":"c6b01f49-6afc-4a28-8564-46a9cd65db71","Type":"ContainerDied","Data":"fff140870778287630c5082bed1b692fe99fec4a4c250e3ac13bb0e7e66fa9f7"} Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.194670 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kgfbg" event={"ID":"c6b01f49-6afc-4a28-8564-46a9cd65db71","Type":"ContainerStarted","Data":"393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2"} Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.195956 4842 generic.go:334] "Generic (PLEG): container finished" podID="36131c9c-3736-4825-85bc-27645ca80178" containerID="c5ce13bc413677b767e22ec75fc3d7ca4a379461549148979323f837b0a4ac6d" exitCode=0 Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.196021 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" event={"ID":"36131c9c-3736-4825-85bc-27645ca80178","Type":"ContainerDied","Data":"c5ce13bc413677b767e22ec75fc3d7ca4a379461549148979323f837b0a4ac6d"} Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.196046 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" event={"ID":"36131c9c-3736-4825-85bc-27645ca80178","Type":"ContainerStarted","Data":"6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0"} Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.198021 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerStarted","Data":"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f"} Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.329212 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-z5kzq"] Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.332481 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.334531 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.334555 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.334628 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.335615 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-z5kzq"] Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425372 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrdd2\" (UniqueName: \"kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425478 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425522 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425562 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425618 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425642 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.425676 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:21 crc kubenswrapper[4842]: E1111 13:56:21.425944 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:21 crc kubenswrapper[4842]: E1111 13:56:21.425966 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:21 crc kubenswrapper[4842]: E1111 13:56:21.426020 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:22.426001151 +0000 UTC m=+1593.086290770 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.527725 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528605 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528657 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528729 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528763 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528864 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrdd2\" (UniqueName: \"kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.528895 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.529524 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.529699 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.531799 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.531836 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.533589 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.538783 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.553656 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrdd2\" (UniqueName: \"kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2\") pod \"swift-ring-rebalance-z5kzq\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:21 crc kubenswrapper[4842]: I1111 13:56:21.700068 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.134533 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-z5kzq"] Nov 11 13:56:22 crc kubenswrapper[4842]: W1111 13:56:22.145762 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b12adcf_9678_4493_8035_061dcdf98b6e.slice/crio-2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95 WatchSource:0}: Error finding container 2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95: Status 404 returned error can't find the container with id 2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95 Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.209045 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-z5kzq" event={"ID":"8b12adcf-9678-4493-8035-061dcdf98b6e","Type":"ContainerStarted","Data":"2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95"} Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.235093 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" event={"ID":"36131c9c-3736-4825-85bc-27645ca80178","Type":"ContainerStarted","Data":"85e8393e9569a57963eb62a7fc2789cc78f00ec82ee42f10daa4218d20ab55a1"} Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.451445 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:22 crc kubenswrapper[4842]: E1111 13:56:22.451682 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:22 crc kubenswrapper[4842]: E1111 13:56:22.451715 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:22 crc kubenswrapper[4842]: E1111 13:56:22.451787 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:24.45175686 +0000 UTC m=+1595.112046479 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.642094 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.667320 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" podStartSLOduration=3.6672961539999998 podStartE2EDuration="3.667296154s" podCreationTimestamp="2025-11-11 13:56:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:56:22.265114174 +0000 UTC m=+1592.925403813" watchObservedRunningTime="2025-11-11 13:56:22.667296154 +0000 UTC m=+1593.327585783" Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.757163 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d7fs\" (UniqueName: \"kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs\") pod \"c6b01f49-6afc-4a28-8564-46a9cd65db71\" (UID: \"c6b01f49-6afc-4a28-8564-46a9cd65db71\") " Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.763287 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs" (OuterVolumeSpecName: "kube-api-access-7d7fs") pod "c6b01f49-6afc-4a28-8564-46a9cd65db71" (UID: "c6b01f49-6afc-4a28-8564-46a9cd65db71"). InnerVolumeSpecName "kube-api-access-7d7fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:22 crc kubenswrapper[4842]: I1111 13:56:22.859405 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d7fs\" (UniqueName: \"kubernetes.io/projected/c6b01f49-6afc-4a28-8564-46a9cd65db71-kube-api-access-7d7fs\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:23 crc kubenswrapper[4842]: I1111 13:56:23.261858 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-kgfbg" Nov 11 13:56:23 crc kubenswrapper[4842]: I1111 13:56:23.262048 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-kgfbg" event={"ID":"c6b01f49-6afc-4a28-8564-46a9cd65db71","Type":"ContainerDied","Data":"393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2"} Nov 11 13:56:23 crc kubenswrapper[4842]: I1111 13:56:23.262655 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="393ff697a5543ecd9d438eb67af955c06691c1d288429131799bbbc767aa6ff2" Nov 11 13:56:23 crc kubenswrapper[4842]: I1111 13:56:23.262678 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:24 crc kubenswrapper[4842]: I1111 13:56:24.497401 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:24 crc kubenswrapper[4842]: E1111 13:56:24.497579 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:24 crc kubenswrapper[4842]: E1111 13:56:24.497612 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:24 crc kubenswrapper[4842]: E1111 13:56:24.497677 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:28.497656953 +0000 UTC m=+1599.157946572 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.773844 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-k2gmm"] Nov 11 13:56:26 crc kubenswrapper[4842]: E1111 13:56:26.774443 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6b01f49-6afc-4a28-8564-46a9cd65db71" containerName="mariadb-database-create" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.774455 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6b01f49-6afc-4a28-8564-46a9cd65db71" containerName="mariadb-database-create" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.774611 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6b01f49-6afc-4a28-8564-46a9cd65db71" containerName="mariadb-database-create" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.775299 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.781339 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k2gmm"] Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.836833 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kszrj\" (UniqueName: \"kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj\") pod \"keystone-db-create-k2gmm\" (UID: \"67ae8176-e61d-426c-bb60-286f00de14e3\") " pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.938025 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kszrj\" (UniqueName: \"kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj\") pod \"keystone-db-create-k2gmm\" (UID: \"67ae8176-e61d-426c-bb60-286f00de14e3\") " pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.969333 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kszrj\" (UniqueName: \"kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj\") pod \"keystone-db-create-k2gmm\" (UID: \"67ae8176-e61d-426c-bb60-286f00de14e3\") " pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:26 crc kubenswrapper[4842]: I1111 13:56:26.979169 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.094020 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.122281 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-bkbt8"] Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.123284 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.130278 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bkbt8"] Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.242738 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhvct\" (UniqueName: \"kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct\") pod \"placement-db-create-bkbt8\" (UID: \"d57c4421-985b-449d-a007-c05c9ab3e434\") " pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.343931 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhvct\" (UniqueName: \"kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct\") pod \"placement-db-create-bkbt8\" (UID: \"d57c4421-985b-449d-a007-c05c9ab3e434\") " pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.358308 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhvct\" (UniqueName: \"kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct\") pod \"placement-db-create-bkbt8\" (UID: \"d57c4421-985b-449d-a007-c05c9ab3e434\") " pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:27 crc kubenswrapper[4842]: I1111 13:56:27.444943 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:28 crc kubenswrapper[4842]: I1111 13:56:28.563861 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:28 crc kubenswrapper[4842]: E1111 13:56:28.564060 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:28 crc kubenswrapper[4842]: E1111 13:56:28.564318 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:28 crc kubenswrapper[4842]: E1111 13:56:28.564382 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:36.564363549 +0000 UTC m=+1607.224653168 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.755037 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-53b5-account-create-pptc8"] Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.756453 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.758829 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.763907 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-53b5-account-create-pptc8"] Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.783225 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q56hk\" (UniqueName: \"kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk\") pod \"watcher-53b5-account-create-pptc8\" (UID: \"414c83eb-da9d-454d-bf91-d577ca5b195b\") " pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.868425 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.885437 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q56hk\" (UniqueName: \"kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk\") pod \"watcher-53b5-account-create-pptc8\" (UID: \"414c83eb-da9d-454d-bf91-d577ca5b195b\") " pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.915841 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.916074 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="dnsmasq-dns" containerID="cri-o://e88a391af281c51117a74cfcd095b45d620a3c336944efa218b23ae5025728ba" gracePeriod=10 Nov 11 13:56:29 crc kubenswrapper[4842]: I1111 13:56:29.927085 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q56hk\" (UniqueName: \"kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk\") pod \"watcher-53b5-account-create-pptc8\" (UID: \"414c83eb-da9d-454d-bf91-d577ca5b195b\") " pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:30 crc kubenswrapper[4842]: I1111 13:56:30.082785 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:30 crc kubenswrapper[4842]: I1111 13:56:30.954584 4842 generic.go:334] "Generic (PLEG): container finished" podID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerID="e88a391af281c51117a74cfcd095b45d620a3c336944efa218b23ae5025728ba" exitCode=0 Nov 11 13:56:30 crc kubenswrapper[4842]: I1111 13:56:30.954658 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" event={"ID":"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92","Type":"ContainerDied","Data":"e88a391af281c51117a74cfcd095b45d620a3c336944efa218b23ae5025728ba"} Nov 11 13:56:32 crc kubenswrapper[4842]: I1111 13:56:32.012708 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Nov 11 13:56:32 crc kubenswrapper[4842]: E1111 13:56:32.972973 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8" Nov 11 13:56:32 crc kubenswrapper[4842]: E1111 13:56:32.973432 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:thanos-sidecar,Image:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8,Command:[],Args:[sidecar --prometheus.url=http://localhost:9090/ --grpc-address=:10901 --http-address=:10902 --log.level=info --prometheus.http-client-file=/etc/thanos/config/prometheus.http-client-file.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:10902,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10901,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:thanos-prometheus-http-client-file,ReadOnly:false,MountPath:/etc/thanos/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-px57j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(218296f7-79b4-47ed-93e7-e0cac5ee935d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:56:32 crc kubenswrapper[4842]: E1111 13:56:32.974684 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.405550 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.453268 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb\") pod \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.453494 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb\") pod \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.453542 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc\") pod \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.453584 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsz6f\" (UniqueName: \"kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f\") pod \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.453628 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config\") pod \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\" (UID: \"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92\") " Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.459913 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f" (OuterVolumeSpecName: "kube-api-access-gsz6f") pod "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" (UID: "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"). InnerVolumeSpecName "kube-api-access-gsz6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.503067 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" (UID: "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.520437 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" (UID: "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.522434 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" (UID: "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.527425 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config" (OuterVolumeSpecName: "config") pod "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" (UID: "2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.555478 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.555709 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.555729 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsz6f\" (UniqueName: \"kubernetes.io/projected/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-kube-api-access-gsz6f\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.555740 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.555748 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.590695 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bkbt8"] Nov 11 13:56:33 crc kubenswrapper[4842]: W1111 13:56:33.606344 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd57c4421_985b_449d_a007_c05c9ab3e434.slice/crio-d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509 WatchSource:0}: Error finding container d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509: Status 404 returned error can't find the container with id d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509 Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.610091 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-53b5-account-create-pptc8"] Nov 11 13:56:33 crc kubenswrapper[4842]: W1111 13:56:33.616268 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod414c83eb_da9d_454d_bf91_d577ca5b195b.slice/crio-1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910 WatchSource:0}: Error finding container 1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910: Status 404 returned error can't find the container with id 1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910 Nov 11 13:56:33 crc kubenswrapper[4842]: I1111 13:56:33.618590 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k2gmm"] Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.000845 4842 generic.go:334] "Generic (PLEG): container finished" podID="414c83eb-da9d-454d-bf91-d577ca5b195b" containerID="a0d1582671902c4bdb1bad9135e8a03c9ae4727bb4dbcbea868cc7db9539de05" exitCode=0 Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.000940 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-53b5-account-create-pptc8" event={"ID":"414c83eb-da9d-454d-bf91-d577ca5b195b","Type":"ContainerDied","Data":"a0d1582671902c4bdb1bad9135e8a03c9ae4727bb4dbcbea868cc7db9539de05"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.000981 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-53b5-account-create-pptc8" event={"ID":"414c83eb-da9d-454d-bf91-d577ca5b195b","Type":"ContainerStarted","Data":"1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.003687 4842 generic.go:334] "Generic (PLEG): container finished" podID="67ae8176-e61d-426c-bb60-286f00de14e3" containerID="a8787af09f69c3034752d577148fe07f5ee677c9a5035ea073467905f5ad6987" exitCode=0 Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.003773 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k2gmm" event={"ID":"67ae8176-e61d-426c-bb60-286f00de14e3","Type":"ContainerDied","Data":"a8787af09f69c3034752d577148fe07f5ee677c9a5035ea073467905f5ad6987"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.003805 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k2gmm" event={"ID":"67ae8176-e61d-426c-bb60-286f00de14e3","Type":"ContainerStarted","Data":"d12f47e084077419803acb499ef835db6e976ac2948b39d6ebed55f972786436"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.007611 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-z5kzq" event={"ID":"8b12adcf-9678-4493-8035-061dcdf98b6e","Type":"ContainerStarted","Data":"c2443e5327db618c7e23712c1cfa66bee8b9902b9d074c86092b71c590b08e1f"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.012459 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" event={"ID":"2ceb7f5c-d310-4e14-87d0-bad5a01e5e92","Type":"ContainerDied","Data":"2f2091f9309eb346bc074ef9d6ffb0a1b90225f43978165d33bae80ccdcdfac4"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.012543 4842 scope.go:117] "RemoveContainer" containerID="e88a391af281c51117a74cfcd095b45d620a3c336944efa218b23ae5025728ba" Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.012484 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.015910 4842 generic.go:334] "Generic (PLEG): container finished" podID="d57c4421-985b-449d-a007-c05c9ab3e434" containerID="6a63dafd393be5d16a7041b67b38e5c7b0e5d5d211e92e7731d2ef8683ad8fff" exitCode=0 Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.016014 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bkbt8" event={"ID":"d57c4421-985b-449d-a007-c05c9ab3e434","Type":"ContainerDied","Data":"6a63dafd393be5d16a7041b67b38e5c7b0e5d5d211e92e7731d2ef8683ad8fff"} Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.016065 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bkbt8" event={"ID":"d57c4421-985b-449d-a007-c05c9ab3e434","Type":"ContainerStarted","Data":"d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509"} Nov 11 13:56:34 crc kubenswrapper[4842]: E1111 13:56:34.017150 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.055221 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-z5kzq" podStartSLOduration=2.099778287 podStartE2EDuration="13.055204924s" podCreationTimestamp="2025-11-11 13:56:21 +0000 UTC" firstStartedPulling="2025-11-11 13:56:22.147746228 +0000 UTC m=+1592.808035837" lastFinishedPulling="2025-11-11 13:56:33.103172865 +0000 UTC m=+1603.763462474" observedRunningTime="2025-11-11 13:56:34.053238631 +0000 UTC m=+1604.713528260" watchObservedRunningTime="2025-11-11 13:56:34.055204924 +0000 UTC m=+1604.715494543" Nov 11 13:56:34 crc kubenswrapper[4842]: I1111 13:56:34.063796 4842 scope.go:117] "RemoveContainer" containerID="d1e55584233ec6d4c1a3b6c925b6469b5795e4cf4ae58297e1dcdf8e68dcfd5e" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.463538 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.577597 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.584319 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.596013 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q56hk\" (UniqueName: \"kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk\") pod \"414c83eb-da9d-454d-bf91-d577ca5b195b\" (UID: \"414c83eb-da9d-454d-bf91-d577ca5b195b\") " Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.601371 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk" (OuterVolumeSpecName: "kube-api-access-q56hk") pod "414c83eb-da9d-454d-bf91-d577ca5b195b" (UID: "414c83eb-da9d-454d-bf91-d577ca5b195b"). InnerVolumeSpecName "kube-api-access-q56hk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.698167 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kszrj\" (UniqueName: \"kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj\") pod \"67ae8176-e61d-426c-bb60-286f00de14e3\" (UID: \"67ae8176-e61d-426c-bb60-286f00de14e3\") " Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.698344 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhvct\" (UniqueName: \"kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct\") pod \"d57c4421-985b-449d-a007-c05c9ab3e434\" (UID: \"d57c4421-985b-449d-a007-c05c9ab3e434\") " Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.698687 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q56hk\" (UniqueName: \"kubernetes.io/projected/414c83eb-da9d-454d-bf91-d577ca5b195b-kube-api-access-q56hk\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.701137 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj" (OuterVolumeSpecName: "kube-api-access-kszrj") pod "67ae8176-e61d-426c-bb60-286f00de14e3" (UID: "67ae8176-e61d-426c-bb60-286f00de14e3"). InnerVolumeSpecName "kube-api-access-kszrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.701534 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct" (OuterVolumeSpecName: "kube-api-access-xhvct") pod "d57c4421-985b-449d-a007-c05c9ab3e434" (UID: "d57c4421-985b-449d-a007-c05c9ab3e434"). InnerVolumeSpecName "kube-api-access-xhvct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.800465 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kszrj\" (UniqueName: \"kubernetes.io/projected/67ae8176-e61d-426c-bb60-286f00de14e3-kube-api-access-kszrj\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:35 crc kubenswrapper[4842]: I1111 13:56:35.800774 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhvct\" (UniqueName: \"kubernetes.io/projected/d57c4421-985b-449d-a007-c05c9ab3e434-kube-api-access-xhvct\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.033927 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bkbt8" event={"ID":"d57c4421-985b-449d-a007-c05c9ab3e434","Type":"ContainerDied","Data":"d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509"} Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.033971 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d122a1ac87fcc398a310b8d5ff9403d57c00fb7d4738a599527be9203d1ad509" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.034022 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bkbt8" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.035787 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-53b5-account-create-pptc8" event={"ID":"414c83eb-da9d-454d-bf91-d577ca5b195b","Type":"ContainerDied","Data":"1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910"} Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.035814 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d434d5411673e35e5d1a56457fd29cdb5e2f3aa1cf1d0321a8db6df65837910" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.035860 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-53b5-account-create-pptc8" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.048339 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k2gmm" event={"ID":"67ae8176-e61d-426c-bb60-286f00de14e3","Type":"ContainerDied","Data":"d12f47e084077419803acb499ef835db6e976ac2948b39d6ebed55f972786436"} Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.048393 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d12f47e084077419803acb499ef835db6e976ac2948b39d6ebed55f972786436" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.048426 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k2gmm" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.072427 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.075074 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.615746 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.616012 4842 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.616044 4842 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.616125 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift podName:2a1e5076-485c-4759-ba37-33e161741f74 nodeName:}" failed. No retries permitted until 2025-11-11 13:56:52.616091447 +0000 UTC m=+1623.276381066 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift") pod "swift-storage-0" (UID: "2a1e5076-485c-4759-ba37-33e161741f74") : configmap "swift-ring-files" not found Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905249 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-eda9-account-create-jdqqc"] Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.905578 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="init" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905591 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="init" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.905605 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ae8176-e61d-426c-bb60-286f00de14e3" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905613 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ae8176-e61d-426c-bb60-286f00de14e3" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.905628 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="414c83eb-da9d-454d-bf91-d577ca5b195b" containerName="mariadb-account-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905635 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="414c83eb-da9d-454d-bf91-d577ca5b195b" containerName="mariadb-account-create" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.905648 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d57c4421-985b-449d-a007-c05c9ab3e434" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905653 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d57c4421-985b-449d-a007-c05c9ab3e434" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: E1111 13:56:36.905666 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="dnsmasq-dns" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905672 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="dnsmasq-dns" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905849 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d57c4421-985b-449d-a007-c05c9ab3e434" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905862 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="414c83eb-da9d-454d-bf91-d577ca5b195b" containerName="mariadb-account-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905873 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="67ae8176-e61d-426c-bb60-286f00de14e3" containerName="mariadb-database-create" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.905880 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" containerName="dnsmasq-dns" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.906504 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.908171 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 11 13:56:36 crc kubenswrapper[4842]: I1111 13:56:36.926431 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-eda9-account-create-jdqqc"] Nov 11 13:56:37 crc kubenswrapper[4842]: I1111 13:56:37.024084 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g46k2\" (UniqueName: \"kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2\") pod \"keystone-eda9-account-create-jdqqc\" (UID: \"4de90b2f-a1b4-4fab-ab4a-433470a522fb\") " pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:37 crc kubenswrapper[4842]: I1111 13:56:37.125899 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g46k2\" (UniqueName: \"kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2\") pod \"keystone-eda9-account-create-jdqqc\" (UID: \"4de90b2f-a1b4-4fab-ab4a-433470a522fb\") " pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:37 crc kubenswrapper[4842]: I1111 13:56:37.159697 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g46k2\" (UniqueName: \"kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2\") pod \"keystone-eda9-account-create-jdqqc\" (UID: \"4de90b2f-a1b4-4fab-ab4a-433470a522fb\") " pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:37 crc kubenswrapper[4842]: I1111 13:56:37.244513 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:37 crc kubenswrapper[4842]: I1111 13:56:37.662419 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-eda9-account-create-jdqqc"] Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.065127 4842 generic.go:334] "Generic (PLEG): container finished" podID="4de90b2f-a1b4-4fab-ab4a-433470a522fb" containerID="c5312575cee0d77bc14f1a973e4b6baa0a78171022aa2004ba3f3b1ed8ce2a49" exitCode=0 Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.071330 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-eda9-account-create-jdqqc" event={"ID":"4de90b2f-a1b4-4fab-ab4a-433470a522fb","Type":"ContainerDied","Data":"c5312575cee0d77bc14f1a973e4b6baa0a78171022aa2004ba3f3b1ed8ce2a49"} Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.071368 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-eda9-account-create-jdqqc" event={"ID":"4de90b2f-a1b4-4fab-ab4a-433470a522fb","Type":"ContainerStarted","Data":"8ea4a7f6a6755d73d28cc13f116e8d60d3403880cb43aeb2c8ab8cdc56dfcca9"} Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.231384 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-f7sn5" podUID="6ae937ce-ab8b-471f-b809-821ca6f23ecd" containerName="ovn-controller" probeResult="failure" output=< Nov 11 13:56:38 crc kubenswrapper[4842]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 11 13:56:38 crc kubenswrapper[4842]: > Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.288676 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.300116 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-cc6nz" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.504718 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f7sn5-config-x9bdd"] Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.505950 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.511593 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.513404 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f7sn5-config-x9bdd"] Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551532 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551592 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551636 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551661 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd2pq\" (UniqueName: \"kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551801 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.551831 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653079 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653164 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd2pq\" (UniqueName: \"kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653258 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653283 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653341 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653885 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.653979 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.654612 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.654755 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.655324 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.674525 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd2pq\" (UniqueName: \"kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq\") pod \"ovn-controller-f7sn5-config-x9bdd\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:38 crc kubenswrapper[4842]: I1111 13:56:38.826855 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:39 crc kubenswrapper[4842]: I1111 13:56:39.285349 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f7sn5-config-x9bdd"] Nov 11 13:56:39 crc kubenswrapper[4842]: W1111 13:56:39.289021 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8feb5452_21f2_48ea_a82d_41c561e878ea.slice/crio-2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6 WatchSource:0}: Error finding container 2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6: Status 404 returned error can't find the container with id 2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6 Nov 11 13:56:39 crc kubenswrapper[4842]: E1111 13:56:39.925766 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8feb5452_21f2_48ea_a82d_41c561e878ea.slice/crio-conmon-d6967b776d91d6c0f11a25b3c2eac73ec5c2310ad6fce5ea72129cda25a38a7a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8feb5452_21f2_48ea_a82d_41c561e878ea.slice/crio-d6967b776d91d6c0f11a25b3c2eac73ec5c2310ad6fce5ea72129cda25a38a7a.scope\": RecentStats: unable to find data in memory cache]" Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.033564 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.076420 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g46k2\" (UniqueName: \"kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2\") pod \"4de90b2f-a1b4-4fab-ab4a-433470a522fb\" (UID: \"4de90b2f-a1b4-4fab-ab4a-433470a522fb\") " Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.092539 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2" (OuterVolumeSpecName: "kube-api-access-g46k2") pod "4de90b2f-a1b4-4fab-ab4a-433470a522fb" (UID: "4de90b2f-a1b4-4fab-ab4a-433470a522fb"). InnerVolumeSpecName "kube-api-access-g46k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.095241 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-eda9-account-create-jdqqc" Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.095987 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-eda9-account-create-jdqqc" event={"ID":"4de90b2f-a1b4-4fab-ab4a-433470a522fb","Type":"ContainerDied","Data":"8ea4a7f6a6755d73d28cc13f116e8d60d3403880cb43aeb2c8ab8cdc56dfcca9"} Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.096034 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ea4a7f6a6755d73d28cc13f116e8d60d3403880cb43aeb2c8ab8cdc56dfcca9" Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.096937 4842 generic.go:334] "Generic (PLEG): container finished" podID="121e4ffa-c7c1-40ef-a668-500b2cc8fba6" containerID="80f8963a1103cec4985aa1bf4d5152b260a24f6f08e57cf58bea4fbb7bc5a858" exitCode=0 Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.097033 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"121e4ffa-c7c1-40ef-a668-500b2cc8fba6","Type":"ContainerDied","Data":"80f8963a1103cec4985aa1bf4d5152b260a24f6f08e57cf58bea4fbb7bc5a858"} Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.099461 4842 generic.go:334] "Generic (PLEG): container finished" podID="8feb5452-21f2-48ea-a82d-41c561e878ea" containerID="d6967b776d91d6c0f11a25b3c2eac73ec5c2310ad6fce5ea72129cda25a38a7a" exitCode=0 Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.099497 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f7sn5-config-x9bdd" event={"ID":"8feb5452-21f2-48ea-a82d-41c561e878ea","Type":"ContainerDied","Data":"d6967b776d91d6c0f11a25b3c2eac73ec5c2310ad6fce5ea72129cda25a38a7a"} Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.099518 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f7sn5-config-x9bdd" event={"ID":"8feb5452-21f2-48ea-a82d-41c561e878ea","Type":"ContainerStarted","Data":"2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6"} Nov 11 13:56:40 crc kubenswrapper[4842]: I1111 13:56:40.178651 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g46k2\" (UniqueName: \"kubernetes.io/projected/4de90b2f-a1b4-4fab-ab4a-433470a522fb-kube-api-access-g46k2\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.071636 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:41 crc kubenswrapper[4842]: E1111 13:56:41.073850 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.074016 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.109498 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"121e4ffa-c7c1-40ef-a668-500b2cc8fba6","Type":"ContainerStarted","Data":"505e95b5f460ceb32f5b0d401e9f09e80c8e5380e04e8d5cd80e0cd8b1cea110"} Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.110214 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.111722 4842 generic.go:334] "Generic (PLEG): container finished" podID="8b12adcf-9678-4493-8035-061dcdf98b6e" containerID="c2443e5327db618c7e23712c1cfa66bee8b9902b9d074c86092b71c590b08e1f" exitCode=0 Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.111888 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-z5kzq" event={"ID":"8b12adcf-9678-4493-8035-061dcdf98b6e","Type":"ContainerDied","Data":"c2443e5327db618c7e23712c1cfa66bee8b9902b9d074c86092b71c590b08e1f"} Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.114074 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:41 crc kubenswrapper[4842]: E1111 13:56:41.114303 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.144653 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=37.348731476 podStartE2EDuration="1m9.144605208s" podCreationTimestamp="2025-11-11 13:55:32 +0000 UTC" firstStartedPulling="2025-11-11 13:55:34.341142431 +0000 UTC m=+1545.001432050" lastFinishedPulling="2025-11-11 13:56:06.137016163 +0000 UTC m=+1576.797305782" observedRunningTime="2025-11-11 13:56:41.131926525 +0000 UTC m=+1611.792216154" watchObservedRunningTime="2025-11-11 13:56:41.144605208 +0000 UTC m=+1611.804894827" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.481616 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505502 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505683 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd2pq\" (UniqueName: \"kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505804 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505877 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505951 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505927 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run" (OuterVolumeSpecName: "var-run") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.506011 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn\") pod \"8feb5452-21f2-48ea-a82d-41c561e878ea\" (UID: \"8feb5452-21f2-48ea-a82d-41c561e878ea\") " Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.505973 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.506280 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.506588 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.506768 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts" (OuterVolumeSpecName: "scripts") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.507150 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.507174 4842 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8feb5452-21f2-48ea-a82d-41c561e878ea-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.507186 4842 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.507199 4842 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.507209 4842 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8feb5452-21f2-48ea-a82d-41c561e878ea-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.513482 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq" (OuterVolumeSpecName: "kube-api-access-qd2pq") pod "8feb5452-21f2-48ea-a82d-41c561e878ea" (UID: "8feb5452-21f2-48ea-a82d-41c561e878ea"). InnerVolumeSpecName "kube-api-access-qd2pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:41 crc kubenswrapper[4842]: I1111 13:56:41.608725 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd2pq\" (UniqueName: \"kubernetes.io/projected/8feb5452-21f2-48ea-a82d-41c561e878ea-kube-api-access-qd2pq\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.120607 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f7sn5-config-x9bdd" event={"ID":"8feb5452-21f2-48ea-a82d-41c561e878ea","Type":"ContainerDied","Data":"2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6"} Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.120659 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2af114dbe8636909b438df0cca61c7b5507148d5e5e8f77783df6c5cdb7d50f6" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.120723 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f7sn5-config-x9bdd" Nov 11 13:56:42 crc kubenswrapper[4842]: E1111 13:56:42.123040 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:f3806c97420ec8ba91895ce7627df7612cccb927c05d7854377f45cdd6c924a8\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.483117 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.599725 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f7sn5-config-x9bdd"] Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.606578 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-f7sn5-config-x9bdd"] Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623092 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623164 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623225 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623297 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrdd2\" (UniqueName: \"kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623388 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623425 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.623457 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf\") pod \"8b12adcf-9678-4493-8035-061dcdf98b6e\" (UID: \"8b12adcf-9678-4493-8035-061dcdf98b6e\") " Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.626182 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.626518 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.628981 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2" (OuterVolumeSpecName: "kube-api-access-lrdd2") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "kube-api-access-lrdd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.634264 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.647616 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.653377 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts" (OuterVolumeSpecName: "scripts") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.653541 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b12adcf-9678-4493-8035-061dcdf98b6e" (UID: "8b12adcf-9678-4493-8035-061dcdf98b6e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725615 4842 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8b12adcf-9678-4493-8035-061dcdf98b6e-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725656 4842 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725672 4842 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725685 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725696 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b12adcf-9678-4493-8035-061dcdf98b6e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725707 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrdd2\" (UniqueName: \"kubernetes.io/projected/8b12adcf-9678-4493-8035-061dcdf98b6e-kube-api-access-lrdd2\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:42 crc kubenswrapper[4842]: I1111 13:56:42.725719 4842 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8b12adcf-9678-4493-8035-061dcdf98b6e-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.135979 4842 generic.go:334] "Generic (PLEG): container finished" podID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerID="b7c2238f1340635d5115dd9fe56f38cf43d59742baa12c7b6cb17a3e757a55bb" exitCode=0 Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.136017 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerDied","Data":"b7c2238f1340635d5115dd9fe56f38cf43d59742baa12c7b6cb17a3e757a55bb"} Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.138304 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-z5kzq" event={"ID":"8b12adcf-9678-4493-8035-061dcdf98b6e","Type":"ContainerDied","Data":"2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95"} Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.138335 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b87ac9a199c438aa9dd50f069f945631301dbfb67a19f5d5538e82f8f173f95" Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.138426 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-z5kzq" Nov 11 13:56:43 crc kubenswrapper[4842]: I1111 13:56:43.265644 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-f7sn5" Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.070216 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8feb5452-21f2-48ea-a82d-41c561e878ea" path="/var/lib/kubelet/pods/8feb5452-21f2-48ea-a82d-41c561e878ea/volumes" Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.148187 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerStarted","Data":"9a10a06dd7fcf421396f8acc5ff985e3c3263a70b5ae97a1dfc89eec8abf72a6"} Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.148903 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.150337 4842 generic.go:334] "Generic (PLEG): container finished" podID="13087f6b-10cb-421a-b695-84006a81506f" containerID="8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf" exitCode=0 Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.150362 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerDied","Data":"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf"} Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.182987 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371963.67181 podStartE2EDuration="1m13.182965113s" podCreationTimestamp="2025-11-11 13:55:31 +0000 UTC" firstStartedPulling="2025-11-11 13:55:33.85458157 +0000 UTC m=+1544.514871189" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:56:44.173515443 +0000 UTC m=+1614.833805062" watchObservedRunningTime="2025-11-11 13:56:44.182965113 +0000 UTC m=+1614.843254732" Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.961554 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:56:44 crc kubenswrapper[4842]: I1111 13:56:44.961605 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:56:45 crc kubenswrapper[4842]: I1111 13:56:45.159599 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerStarted","Data":"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f"} Nov 11 13:56:45 crc kubenswrapper[4842]: I1111 13:56:45.160362 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:56:45 crc kubenswrapper[4842]: I1111 13:56:45.193793 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371963.661001 podStartE2EDuration="1m13.193774858s" podCreationTimestamp="2025-11-11 13:55:32 +0000 UTC" firstStartedPulling="2025-11-11 13:55:34.133640791 +0000 UTC m=+1544.793930410" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:56:45.188895514 +0000 UTC m=+1615.849185143" watchObservedRunningTime="2025-11-11 13:56:45.193774858 +0000 UTC m=+1615.854064487" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.244262 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-da18-account-create-npt7z"] Nov 11 13:56:47 crc kubenswrapper[4842]: E1111 13:56:47.245146 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4de90b2f-a1b4-4fab-ab4a-433470a522fb" containerName="mariadb-account-create" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245163 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4de90b2f-a1b4-4fab-ab4a-433470a522fb" containerName="mariadb-account-create" Nov 11 13:56:47 crc kubenswrapper[4842]: E1111 13:56:47.245200 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b12adcf-9678-4493-8035-061dcdf98b6e" containerName="swift-ring-rebalance" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245206 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b12adcf-9678-4493-8035-061dcdf98b6e" containerName="swift-ring-rebalance" Nov 11 13:56:47 crc kubenswrapper[4842]: E1111 13:56:47.245218 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8feb5452-21f2-48ea-a82d-41c561e878ea" containerName="ovn-config" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245224 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8feb5452-21f2-48ea-a82d-41c561e878ea" containerName="ovn-config" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245389 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8feb5452-21f2-48ea-a82d-41c561e878ea" containerName="ovn-config" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245406 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b12adcf-9678-4493-8035-061dcdf98b6e" containerName="swift-ring-rebalance" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.245418 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="4de90b2f-a1b4-4fab-ab4a-433470a522fb" containerName="mariadb-account-create" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.246003 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.248214 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.264262 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-da18-account-create-npt7z"] Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.402720 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89bng\" (UniqueName: \"kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng\") pod \"placement-da18-account-create-npt7z\" (UID: \"ead23b7d-4069-406c-ad8f-a23027ccedd0\") " pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.504235 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89bng\" (UniqueName: \"kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng\") pod \"placement-da18-account-create-npt7z\" (UID: \"ead23b7d-4069-406c-ad8f-a23027ccedd0\") " pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.523811 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89bng\" (UniqueName: \"kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng\") pod \"placement-da18-account-create-npt7z\" (UID: \"ead23b7d-4069-406c-ad8f-a23027ccedd0\") " pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:47 crc kubenswrapper[4842]: I1111 13:56:47.576229 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:48 crc kubenswrapper[4842]: I1111 13:56:48.058270 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-da18-account-create-npt7z"] Nov 11 13:56:48 crc kubenswrapper[4842]: I1111 13:56:48.188027 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-da18-account-create-npt7z" event={"ID":"ead23b7d-4069-406c-ad8f-a23027ccedd0","Type":"ContainerStarted","Data":"6975b63a4435006229e978bf22de0fa457e06adb5a95eab8be44ee0f2a725fda"} Nov 11 13:56:49 crc kubenswrapper[4842]: I1111 13:56:49.196426 4842 generic.go:334] "Generic (PLEG): container finished" podID="ead23b7d-4069-406c-ad8f-a23027ccedd0" containerID="66077263a1809dddd2813736e93559206d5f18b79f674e07e4d6c79121a2b3e9" exitCode=0 Nov 11 13:56:49 crc kubenswrapper[4842]: I1111 13:56:49.196473 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-da18-account-create-npt7z" event={"ID":"ead23b7d-4069-406c-ad8f-a23027ccedd0","Type":"ContainerDied","Data":"66077263a1809dddd2813736e93559206d5f18b79f674e07e4d6c79121a2b3e9"} Nov 11 13:56:50 crc kubenswrapper[4842]: I1111 13:56:50.523125 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:50 crc kubenswrapper[4842]: I1111 13:56:50.659567 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89bng\" (UniqueName: \"kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng\") pod \"ead23b7d-4069-406c-ad8f-a23027ccedd0\" (UID: \"ead23b7d-4069-406c-ad8f-a23027ccedd0\") " Nov 11 13:56:50 crc kubenswrapper[4842]: I1111 13:56:50.667989 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng" (OuterVolumeSpecName: "kube-api-access-89bng") pod "ead23b7d-4069-406c-ad8f-a23027ccedd0" (UID: "ead23b7d-4069-406c-ad8f-a23027ccedd0"). InnerVolumeSpecName "kube-api-access-89bng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:50 crc kubenswrapper[4842]: I1111 13:56:50.761644 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89bng\" (UniqueName: \"kubernetes.io/projected/ead23b7d-4069-406c-ad8f-a23027ccedd0-kube-api-access-89bng\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:51 crc kubenswrapper[4842]: I1111 13:56:51.212606 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-da18-account-create-npt7z" event={"ID":"ead23b7d-4069-406c-ad8f-a23027ccedd0","Type":"ContainerDied","Data":"6975b63a4435006229e978bf22de0fa457e06adb5a95eab8be44ee0f2a725fda"} Nov 11 13:56:51 crc kubenswrapper[4842]: I1111 13:56:51.212658 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6975b63a4435006229e978bf22de0fa457e06adb5a95eab8be44ee0f2a725fda" Nov 11 13:56:51 crc kubenswrapper[4842]: I1111 13:56:51.212715 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-da18-account-create-npt7z" Nov 11 13:56:52 crc kubenswrapper[4842]: I1111 13:56:52.690883 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:52 crc kubenswrapper[4842]: I1111 13:56:52.708060 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2a1e5076-485c-4759-ba37-33e161741f74-etc-swift\") pod \"swift-storage-0\" (UID: \"2a1e5076-485c-4759-ba37-33e161741f74\") " pod="openstack/swift-storage-0" Nov 11 13:56:52 crc kubenswrapper[4842]: I1111 13:56:52.837386 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 11 13:56:53 crc kubenswrapper[4842]: I1111 13:56:53.184015 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.109:5671: connect: connection refused" Nov 11 13:56:53 crc kubenswrapper[4842]: I1111 13:56:53.394588 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 11 13:56:53 crc kubenswrapper[4842]: I1111 13:56:53.402146 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 13:56:53 crc kubenswrapper[4842]: I1111 13:56:53.843002 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="121e4ffa-c7c1-40ef-a668-500b2cc8fba6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.111:5671: connect: connection refused" Nov 11 13:56:54 crc kubenswrapper[4842]: I1111 13:56:54.237913 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"3f39aa2504f7af0590dd9df5cc0864a901e92de63a88f109927f502face0176e"} Nov 11 13:56:54 crc kubenswrapper[4842]: I1111 13:56:54.237968 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"88f05d3b478c7f734875fd50de385a1bda4dd5466d336c96bee25c657e2982e4"} Nov 11 13:56:54 crc kubenswrapper[4842]: I1111 13:56:54.237984 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"18b96d44d4b0862048f87a3512fbe85cb544db6f8745bce988a8038c6dd8e732"} Nov 11 13:56:55 crc kubenswrapper[4842]: I1111 13:56:55.247649 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"4d068707d2a68fb7e3076f9a524bd94dacdabd9a40deea80991c99220f2fee55"} Nov 11 13:56:55 crc kubenswrapper[4842]: I1111 13:56:55.247975 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"7c82f5eac99ad73453ed165c6b9f79d372beda98e2f43c48082195b1b103b852"} Nov 11 13:56:55 crc kubenswrapper[4842]: I1111 13:56:55.247993 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"5f39027b8e15b24a10557311e09f5af5d13d157744259f23895205a36b239eb9"} Nov 11 13:56:56 crc kubenswrapper[4842]: I1111 13:56:56.260797 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"2fb2628a1ab123a52dd63918163f2b93974dc59b0ce97344f7bc3f4fb1c64334"} Nov 11 13:56:56 crc kubenswrapper[4842]: I1111 13:56:56.261145 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"2a2502739a7a89736e3c1824768e301c797bef233e9009fefc493d589ad0c5d9"} Nov 11 13:56:56 crc kubenswrapper[4842]: I1111 13:56:56.261159 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"fea3adc7982be282fa939a61024c3d99d3fde2e6e8863be6e5bce72ef218e636"} Nov 11 13:56:56 crc kubenswrapper[4842]: I1111 13:56:56.264363 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerStarted","Data":"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8"} Nov 11 13:56:56 crc kubenswrapper[4842]: I1111 13:56:56.285336 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=21.853605744 podStartE2EDuration="1m17.285319178s" podCreationTimestamp="2025-11-11 13:55:39 +0000 UTC" firstStartedPulling="2025-11-11 13:55:59.630827758 +0000 UTC m=+1570.291117377" lastFinishedPulling="2025-11-11 13:56:55.062541202 +0000 UTC m=+1625.722830811" observedRunningTime="2025-11-11 13:56:56.283401867 +0000 UTC m=+1626.943691516" watchObservedRunningTime="2025-11-11 13:56:56.285319178 +0000 UTC m=+1626.945608807" Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.309380 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"8097d42c3c1395e6a8276bc3322e134c0c73ac97c361f5e7335f8e16862e1039"} Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.310626 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"39580b3db889d41e607b257e747be0154e4343acefffa48217e506e74c6c83f1"} Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.310723 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"87872a72658015231b858cbf0b61af25dec2c5bf07030a9b16880a1c920f2adc"} Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.310801 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"6ff186203d24adcd9ed3d086e3ff73d3fa06e03a7ff08f68593faafc3ba2c7df"} Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.310877 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"82344cbd41f781fc4b6699874c63dcb484950cb85d958f456fa0459f3c1f501c"} Nov 11 13:56:57 crc kubenswrapper[4842]: I1111 13:56:57.311011 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"418edce34067d49740bf450804fc2bc7f026648e55513070a11a628f248aabb4"} Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.197252 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.197467 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="prometheus" containerID="cri-o://2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" gracePeriod=600 Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.197626 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="thanos-sidecar" containerID="cri-o://0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" gracePeriod=600 Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.197728 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="config-reloader" containerID="cri-o://bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" gracePeriod=600 Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.323830 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2a1e5076-485c-4759-ba37-33e161741f74","Type":"ContainerStarted","Data":"3aebad87d6ad1bb0e01c9e13c776e7fb6788b186e6a82df173d9ba3a1ed23d0e"} Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.331854 4842 generic.go:334] "Generic (PLEG): container finished" podID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerID="0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" exitCode=0 Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.331900 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerDied","Data":"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8"} Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.357967 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.622678967 podStartE2EDuration="39.357948488s" podCreationTimestamp="2025-11-11 13:56:19 +0000 UTC" firstStartedPulling="2025-11-11 13:56:53.401898723 +0000 UTC m=+1624.062188342" lastFinishedPulling="2025-11-11 13:56:56.137168244 +0000 UTC m=+1626.797457863" observedRunningTime="2025-11-11 13:56:58.352420533 +0000 UTC m=+1629.012710172" watchObservedRunningTime="2025-11-11 13:56:58.357948488 +0000 UTC m=+1629.018238117" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.649032 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:56:58 crc kubenswrapper[4842]: E1111 13:56:58.649415 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead23b7d-4069-406c-ad8f-a23027ccedd0" containerName="mariadb-account-create" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.649441 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead23b7d-4069-406c-ad8f-a23027ccedd0" containerName="mariadb-account-create" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.649635 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead23b7d-4069-406c-ad8f-a23027ccedd0" containerName="mariadb-account-create" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.650596 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.652920 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.668157 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703447 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703541 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703561 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703582 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2cv8\" (UniqueName: \"kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703819 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.703894 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805344 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805410 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805464 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805546 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805575 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.805603 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2cv8\" (UniqueName: \"kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.806934 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.807634 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.808303 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.808934 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.809612 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.845209 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2cv8\" (UniqueName: \"kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8\") pod \"dnsmasq-dns-8447565c-vnzdx\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:58 crc kubenswrapper[4842]: I1111 13:56:58.967057 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.080543 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218496 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px57j\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218641 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218682 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218724 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218850 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218877 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218904 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.218941 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out\") pod \"218296f7-79b4-47ed-93e7-e0cac5ee935d\" (UID: \"218296f7-79b4-47ed-93e7-e0cac5ee935d\") " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.226188 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.227519 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.227558 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j" (OuterVolumeSpecName: "kube-api-access-px57j") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "kube-api-access-px57j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.227642 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config" (OuterVolumeSpecName: "config") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.230304 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out" (OuterVolumeSpecName: "config-out") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.233405 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.256619 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "pvc-41d5652c-383b-4bea-9fe3-3f23d692956d". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.264387 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config" (OuterVolumeSpecName: "web-config") pod "218296f7-79b4-47ed-93e7-e0cac5ee935d" (UID: "218296f7-79b4-47ed-93e7-e0cac5ee935d"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321169 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") on node \"crc\" " Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321217 4842 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/218296f7-79b4-47ed-93e7-e0cac5ee935d-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321236 4842 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321248 4842 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/218296f7-79b4-47ed-93e7-e0cac5ee935d-config-out\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321262 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px57j\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-kube-api-access-px57j\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321277 4842 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-web-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321291 4842 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/218296f7-79b4-47ed-93e7-e0cac5ee935d-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.321301 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/218296f7-79b4-47ed-93e7-e0cac5ee935d-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343279 4842 generic.go:334] "Generic (PLEG): container finished" podID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerID="bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" exitCode=0 Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343316 4842 generic.go:334] "Generic (PLEG): container finished" podID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerID="2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" exitCode=0 Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343496 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343539 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerDied","Data":"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f"} Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343567 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerDied","Data":"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d"} Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343579 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"218296f7-79b4-47ed-93e7-e0cac5ee935d","Type":"ContainerDied","Data":"1bcf0b49382156844d211c77ec8a2384e5ea7c87b32390859da7686c2324afa0"} Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.343597 4842 scope.go:117] "RemoveContainer" containerID="0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.350313 4842 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.350973 4842 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-41d5652c-383b-4bea-9fe3-3f23d692956d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d") on node "crc" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.388864 4842 scope.go:117] "RemoveContainer" containerID="bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.397708 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.404645 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.408293 4842 scope.go:117] "RemoveContainer" containerID="2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.417462 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.417856 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="thanos-sidecar" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.417881 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="thanos-sidecar" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.417924 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="prometheus" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.417937 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="prometheus" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.417949 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="init-config-reloader" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.417958 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="init-config-reloader" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.417967 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="config-reloader" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.417974 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="config-reloader" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.418167 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="config-reloader" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.418192 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="thanos-sidecar" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.418211 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" containerName="prometheus" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.419813 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.422799 4842 reconciler_common.go:293] "Volume detached for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") on node \"crc\" DevicePath \"\"" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.424311 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-tcncx" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.424758 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.428273 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.430434 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.430567 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.430595 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.437728 4842 scope.go:117] "RemoveContainer" containerID="56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.439601 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.447976 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.459899 4842 scope.go:117] "RemoveContainer" containerID="0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.461019 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.463346 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8\": container with ID starting with 0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8 not found: ID does not exist" containerID="0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.463396 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8"} err="failed to get container status \"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8\": rpc error: code = NotFound desc = could not find container \"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8\": container with ID starting with 0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8 not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.463430 4842 scope.go:117] "RemoveContainer" containerID="bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.463832 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f\": container with ID starting with bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f not found: ID does not exist" containerID="bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.463917 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f"} err="failed to get container status \"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f\": rpc error: code = NotFound desc = could not find container \"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f\": container with ID starting with bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.464127 4842 scope.go:117] "RemoveContainer" containerID="2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.465191 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d\": container with ID starting with 2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d not found: ID does not exist" containerID="2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.465249 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d"} err="failed to get container status \"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d\": rpc error: code = NotFound desc = could not find container \"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d\": container with ID starting with 2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.465276 4842 scope.go:117] "RemoveContainer" containerID="56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25" Nov 11 13:56:59 crc kubenswrapper[4842]: E1111 13:56:59.465532 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25\": container with ID starting with 56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25 not found: ID does not exist" containerID="56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.465558 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25"} err="failed to get container status \"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25\": rpc error: code = NotFound desc = could not find container \"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25\": container with ID starting with 56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25 not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.465580 4842 scope.go:117] "RemoveContainer" containerID="0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.467551 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8"} err="failed to get container status \"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8\": rpc error: code = NotFound desc = could not find container \"0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8\": container with ID starting with 0e7b668c7ffb173ae4fc5c125e5a69344d46659e68fd9b2ac8dfb51e311326b8 not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.467575 4842 scope.go:117] "RemoveContainer" containerID="bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.468593 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f"} err="failed to get container status \"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f\": rpc error: code = NotFound desc = could not find container \"bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f\": container with ID starting with bf00954b99c107feacace85a9ab423d14a83a7e799326981d73c382b7c66056f not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.468618 4842 scope.go:117] "RemoveContainer" containerID="2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.468930 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d"} err="failed to get container status \"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d\": rpc error: code = NotFound desc = could not find container \"2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d\": container with ID starting with 2fd9dbe64aff161089d163e026bdcba1ec37750ebb648cf6dcee904cfba2a34d not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.468956 4842 scope.go:117] "RemoveContainer" containerID="56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.469276 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25"} err="failed to get container status \"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25\": rpc error: code = NotFound desc = could not find container \"56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25\": container with ID starting with 56850f26fc2def89ac176bf59ae006393d8d7a4b1003d265c57ef1934213fb25 not found: ID does not exist" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524083 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524162 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524244 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524307 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524332 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524356 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524406 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524436 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524572 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524630 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pwhl\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.524667 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.625810 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626024 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626146 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626229 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pwhl\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626302 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626429 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626520 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626621 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626715 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626786 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626852 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.626908 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.628525 4842 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.628554 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6d1d4d77a4f1fd1afa0791ead5af16f820dac5d1fa2885ec7edd11054a9ebc3/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.630451 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.631210 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.631215 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.631459 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.631942 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.632092 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.632221 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.632402 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.645890 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pwhl\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.670075 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " pod="openstack/prometheus-metric-storage-0" Nov 11 13:56:59 crc kubenswrapper[4842]: I1111 13:56:59.740035 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.069675 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="218296f7-79b4-47ed-93e7-e0cac5ee935d" path="/var/lib/kubelet/pods/218296f7-79b4-47ed-93e7-e0cac5ee935d/volumes" Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.211583 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 13:57:00 crc kubenswrapper[4842]: W1111 13:57:00.215455 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05ca3203_5029_40ea_a623_f2bf653c0af2.slice/crio-76cd89d997b20925b27b9326d48b1f998630fb436dc0e0266fdaeb19814ad7e1 WatchSource:0}: Error finding container 76cd89d997b20925b27b9326d48b1f998630fb436dc0e0266fdaeb19814ad7e1: Status 404 returned error can't find the container with id 76cd89d997b20925b27b9326d48b1f998630fb436dc0e0266fdaeb19814ad7e1 Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.353760 4842 generic.go:334] "Generic (PLEG): container finished" podID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerID="f5f3b1e3a7ec344316a61312deae227ba34e05a1769cde998ee13a09a2b848ac" exitCode=0 Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.354255 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447565c-vnzdx" event={"ID":"fc8b995c-8577-436c-8341-2cc8e9094a10","Type":"ContainerDied","Data":"f5f3b1e3a7ec344316a61312deae227ba34e05a1769cde998ee13a09a2b848ac"} Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.354279 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447565c-vnzdx" event={"ID":"fc8b995c-8577-436c-8341-2cc8e9094a10","Type":"ContainerStarted","Data":"3ccb40a85241aa495253d6d58ee32bb0256be3f4a05441fafb7f863c38af2c0d"} Nov 11 13:57:00 crc kubenswrapper[4842]: I1111 13:57:00.363400 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerStarted","Data":"76cd89d997b20925b27b9326d48b1f998630fb436dc0e0266fdaeb19814ad7e1"} Nov 11 13:57:01 crc kubenswrapper[4842]: I1111 13:57:01.372560 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447565c-vnzdx" event={"ID":"fc8b995c-8577-436c-8341-2cc8e9094a10","Type":"ContainerStarted","Data":"7a4f4b51854c2c8394054164cf43ea1aae702f769713b59232dc15735f0ac776"} Nov 11 13:57:01 crc kubenswrapper[4842]: I1111 13:57:01.372724 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:57:01 crc kubenswrapper[4842]: I1111 13:57:01.391792 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8447565c-vnzdx" podStartSLOduration=3.391773498 podStartE2EDuration="3.391773498s" podCreationTimestamp="2025-11-11 13:56:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:01.387876615 +0000 UTC m=+1632.048166244" watchObservedRunningTime="2025-11-11 13:57:01.391773498 +0000 UTC m=+1632.052063137" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.184135 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.391962 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerStarted","Data":"d2b3e38a69dea58af8fe45e7ba25d0aa771a93d5d3323387829134ce78fc8a85"} Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.462319 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-fnhpk"] Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.463415 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.485911 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj574\" (UniqueName: \"kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574\") pod \"cinder-db-create-fnhpk\" (UID: \"ecc207bf-4706-4b38-9695-ab6ca646eac7\") " pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.510274 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.526196 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-fnhpk"] Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.576240 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-9wc72"] Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.577584 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.587728 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj574\" (UniqueName: \"kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574\") pod \"cinder-db-create-fnhpk\" (UID: \"ecc207bf-4706-4b38-9695-ab6ca646eac7\") " pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.587778 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md6w8\" (UniqueName: \"kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8\") pod \"barbican-db-create-9wc72\" (UID: \"3302c38d-b1ae-4032-9ede-f26e1de76fc8\") " pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.591360 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-9wc72"] Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.613173 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj574\" (UniqueName: \"kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574\") pod \"cinder-db-create-fnhpk\" (UID: \"ecc207bf-4706-4b38-9695-ab6ca646eac7\") " pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.689534 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md6w8\" (UniqueName: \"kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8\") pod \"barbican-db-create-9wc72\" (UID: \"3302c38d-b1ae-4032-9ede-f26e1de76fc8\") " pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.707828 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md6w8\" (UniqueName: \"kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8\") pod \"barbican-db-create-9wc72\" (UID: \"3302c38d-b1ae-4032-9ede-f26e1de76fc8\") " pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.784125 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.842678 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.895466 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.924614 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-7nbtt"] Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.929849 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.932803 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rxh4x" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.932915 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.933178 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.933681 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 11 13:57:03 crc kubenswrapper[4842]: I1111 13:57:03.959511 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7nbtt"] Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.099819 4842 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2ceb7f5c-d310-4e14-87d0-bad5a01e5e92"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2ceb7f5c-d310-4e14-87d0-bad5a01e5e92] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2ceb7f5c_d310_4e14_87d0_bad5a01e5e92.slice" Nov 11 13:57:04 crc kubenswrapper[4842]: E1111 13:57:04.099878 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod2ceb7f5c-d310-4e14-87d0-bad5a01e5e92] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod2ceb7f5c-d310-4e14-87d0-bad5a01e5e92] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2ceb7f5c_d310_4e14_87d0_bad5a01e5e92.slice" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.101822 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl9pb\" (UniqueName: \"kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.101873 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.101923 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.205299 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl9pb\" (UniqueName: \"kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.205357 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.205414 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.210926 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.211878 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.225073 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl9pb\" (UniqueName: \"kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb\") pod \"keystone-db-sync-7nbtt\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.320309 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.326868 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-fnhpk"] Nov 11 13:57:04 crc kubenswrapper[4842]: W1111 13:57:04.345492 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecc207bf_4706_4b38_9695_ab6ca646eac7.slice/crio-1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e WatchSource:0}: Error finding container 1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e: Status 404 returned error can't find the container with id 1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.404351 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cfdc9455f-6kxnv" Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.404986 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fnhpk" event={"ID":"ecc207bf-4706-4b38-9695-ab6ca646eac7","Type":"ContainerStarted","Data":"1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e"} Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.453912 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.462822 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cfdc9455f-6kxnv"] Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.469795 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-9wc72"] Nov 11 13:57:04 crc kubenswrapper[4842]: I1111 13:57:04.782548 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7nbtt"] Nov 11 13:57:04 crc kubenswrapper[4842]: W1111 13:57:04.830064 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b592cf0_e1b9_4e74_8018_f244f2fc25b1.slice/crio-4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778 WatchSource:0}: Error finding container 4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778: Status 404 returned error can't find the container with id 4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778 Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.414557 4842 generic.go:334] "Generic (PLEG): container finished" podID="3302c38d-b1ae-4032-9ede-f26e1de76fc8" containerID="29a09d93f9292afd2540589ae3d2824ce0b44982621c84543a389a652dd954ab" exitCode=0 Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.414644 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9wc72" event={"ID":"3302c38d-b1ae-4032-9ede-f26e1de76fc8","Type":"ContainerDied","Data":"29a09d93f9292afd2540589ae3d2824ce0b44982621c84543a389a652dd954ab"} Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.414677 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9wc72" event={"ID":"3302c38d-b1ae-4032-9ede-f26e1de76fc8","Type":"ContainerStarted","Data":"c83fe3d97f16f8cfdd2a68ca3add816c303219b9f757dc7ee620d3730cdff4ca"} Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.420524 4842 generic.go:334] "Generic (PLEG): container finished" podID="ecc207bf-4706-4b38-9695-ab6ca646eac7" containerID="7a880cb00b085135c48fa1646e350aef6a9601c1aaa7e47e1dc2e4c580fabeaf" exitCode=0 Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.420685 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fnhpk" event={"ID":"ecc207bf-4706-4b38-9695-ab6ca646eac7","Type":"ContainerDied","Data":"7a880cb00b085135c48fa1646e350aef6a9601c1aaa7e47e1dc2e4c580fabeaf"} Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.421600 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7nbtt" event={"ID":"6b592cf0-e1b9-4e74-8018-f244f2fc25b1","Type":"ContainerStarted","Data":"4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778"} Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.955649 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-lcspd"] Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.956872 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.959303 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-ctgrf" Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.959358 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Nov 11 13:57:05 crc kubenswrapper[4842]: I1111 13:57:05.968232 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-lcspd"] Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.009324 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5cxmb"] Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.010600 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.021619 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5cxmb"] Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.042397 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.042441 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.042538 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9dnt\" (UniqueName: \"kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.042562 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.072536 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ceb7f5c-d310-4e14-87d0-bad5a01e5e92" path="/var/lib/kubelet/pods/2ceb7f5c-d310-4e14-87d0-bad5a01e5e92/volumes" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.144268 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.144624 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.144646 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.144733 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4t4g\" (UniqueName: \"kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g\") pod \"glance-db-create-5cxmb\" (UID: \"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d\") " pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.144800 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9dnt\" (UniqueName: \"kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.151932 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.152201 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.176918 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9dnt\" (UniqueName: \"kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.180414 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data\") pod \"watcher-db-sync-lcspd\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.198011 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-qsmk8"] Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.199409 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.213727 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qsmk8"] Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.247127 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4t4g\" (UniqueName: \"kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g\") pod \"glance-db-create-5cxmb\" (UID: \"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d\") " pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.265172 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4t4g\" (UniqueName: \"kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g\") pod \"glance-db-create-5cxmb\" (UID: \"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d\") " pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.277355 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.327820 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.352686 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld55f\" (UniqueName: \"kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f\") pod \"neutron-db-create-qsmk8\" (UID: \"765495ea-98a3-43ba-9a52-9cb05487cbb5\") " pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.456382 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld55f\" (UniqueName: \"kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f\") pod \"neutron-db-create-qsmk8\" (UID: \"765495ea-98a3-43ba-9a52-9cb05487cbb5\") " pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.489046 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld55f\" (UniqueName: \"kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f\") pod \"neutron-db-create-qsmk8\" (UID: \"765495ea-98a3-43ba-9a52-9cb05487cbb5\") " pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:06 crc kubenswrapper[4842]: I1111 13:57:06.540228 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:07 crc kubenswrapper[4842]: I1111 13:57:07.066251 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5cxmb"] Nov 11 13:57:07 crc kubenswrapper[4842]: I1111 13:57:07.083296 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-lcspd"] Nov 11 13:57:07 crc kubenswrapper[4842]: I1111 13:57:07.227359 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qsmk8"] Nov 11 13:57:08 crc kubenswrapper[4842]: I1111 13:57:08.969253 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.034167 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.034439 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="dnsmasq-dns" containerID="cri-o://85e8393e9569a57963eb62a7fc2789cc78f00ec82ee42f10daa4218d20ab55a1" gracePeriod=10 Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.463072 4842 generic.go:334] "Generic (PLEG): container finished" podID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerID="d2b3e38a69dea58af8fe45e7ba25d0aa771a93d5d3323387829134ce78fc8a85" exitCode=0 Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.463135 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerDied","Data":"d2b3e38a69dea58af8fe45e7ba25d0aa771a93d5d3323387829134ce78fc8a85"} Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.465753 4842 generic.go:334] "Generic (PLEG): container finished" podID="36131c9c-3736-4825-85bc-27645ca80178" containerID="85e8393e9569a57963eb62a7fc2789cc78f00ec82ee42f10daa4218d20ab55a1" exitCode=0 Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.465784 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" event={"ID":"36131c9c-3736-4825-85bc-27645ca80178","Type":"ContainerDied","Data":"85e8393e9569a57963eb62a7fc2789cc78f00ec82ee42f10daa4218d20ab55a1"} Nov 11 13:57:09 crc kubenswrapper[4842]: W1111 13:57:09.574291 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb467d4be_8e0e_43a9_b2a7_51cc22c11e25.slice/crio-ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254 WatchSource:0}: Error finding container ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254: Status 404 returned error can't find the container with id ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254 Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.800339 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.850915 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.914898 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.945242 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lj574\" (UniqueName: \"kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574\") pod \"ecc207bf-4706-4b38-9695-ab6ca646eac7\" (UID: \"ecc207bf-4706-4b38-9695-ab6ca646eac7\") " Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.945486 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-md6w8\" (UniqueName: \"kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8\") pod \"3302c38d-b1ae-4032-9ede-f26e1de76fc8\" (UID: \"3302c38d-b1ae-4032-9ede-f26e1de76fc8\") " Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.951645 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8" (OuterVolumeSpecName: "kube-api-access-md6w8") pod "3302c38d-b1ae-4032-9ede-f26e1de76fc8" (UID: "3302c38d-b1ae-4032-9ede-f26e1de76fc8"). InnerVolumeSpecName "kube-api-access-md6w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:09 crc kubenswrapper[4842]: I1111 13:57:09.954505 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574" (OuterVolumeSpecName: "kube-api-access-lj574") pod "ecc207bf-4706-4b38-9695-ab6ca646eac7" (UID: "ecc207bf-4706-4b38-9695-ab6ca646eac7"). InnerVolumeSpecName "kube-api-access-lj574". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.047359 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z2m7\" (UniqueName: \"kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7\") pod \"36131c9c-3736-4825-85bc-27645ca80178\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.047479 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc\") pod \"36131c9c-3736-4825-85bc-27645ca80178\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.047642 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb\") pod \"36131c9c-3736-4825-85bc-27645ca80178\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.047668 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb\") pod \"36131c9c-3736-4825-85bc-27645ca80178\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.047685 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config\") pod \"36131c9c-3736-4825-85bc-27645ca80178\" (UID: \"36131c9c-3736-4825-85bc-27645ca80178\") " Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.048175 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-md6w8\" (UniqueName: \"kubernetes.io/projected/3302c38d-b1ae-4032-9ede-f26e1de76fc8-kube-api-access-md6w8\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.048194 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lj574\" (UniqueName: \"kubernetes.io/projected/ecc207bf-4706-4b38-9695-ab6ca646eac7-kube-api-access-lj574\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.052538 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7" (OuterVolumeSpecName: "kube-api-access-7z2m7") pod "36131c9c-3736-4825-85bc-27645ca80178" (UID: "36131c9c-3736-4825-85bc-27645ca80178"). InnerVolumeSpecName "kube-api-access-7z2m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.099409 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config" (OuterVolumeSpecName: "config") pod "36131c9c-3736-4825-85bc-27645ca80178" (UID: "36131c9c-3736-4825-85bc-27645ca80178"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.103650 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "36131c9c-3736-4825-85bc-27645ca80178" (UID: "36131c9c-3736-4825-85bc-27645ca80178"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.121071 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "36131c9c-3736-4825-85bc-27645ca80178" (UID: "36131c9c-3736-4825-85bc-27645ca80178"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.121075 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "36131c9c-3736-4825-85bc-27645ca80178" (UID: "36131c9c-3736-4825-85bc-27645ca80178"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.150020 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.150057 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.150070 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.150083 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z2m7\" (UniqueName: \"kubernetes.io/projected/36131c9c-3736-4825-85bc-27645ca80178-kube-api-access-7z2m7\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.150120 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36131c9c-3736-4825-85bc-27645ca80178-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.480964 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-fnhpk" event={"ID":"ecc207bf-4706-4b38-9695-ab6ca646eac7","Type":"ContainerDied","Data":"1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.481306 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1414ee9883b148c0ff4ac7376ac58b62c2455c28dafcd764f47622d3c230036e" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.480972 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-fnhpk" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.482844 4842 generic.go:334] "Generic (PLEG): container finished" podID="765495ea-98a3-43ba-9a52-9cb05487cbb5" containerID="d1ee2e5d7242d98238ecc29642dd08f4f110b59935e67f0fe3ec88a5f3029ee4" exitCode=0 Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.482909 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qsmk8" event={"ID":"765495ea-98a3-43ba-9a52-9cb05487cbb5","Type":"ContainerDied","Data":"d1ee2e5d7242d98238ecc29642dd08f4f110b59935e67f0fe3ec88a5f3029ee4"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.482936 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qsmk8" event={"ID":"765495ea-98a3-43ba-9a52-9cb05487cbb5","Type":"ContainerStarted","Data":"0f487b9d13fbeccc165e8afb51511186e0f4ad2f3e647811ed2ffca28c2f2832"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.483904 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-lcspd" event={"ID":"b467d4be-8e0e-43a9-b2a7-51cc22c11e25","Type":"ContainerStarted","Data":"ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.486319 4842 generic.go:334] "Generic (PLEG): container finished" podID="36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" containerID="0f82b024549ee83a881868bd2c19b000a7cffa515c0eb3408d9f7786124fe2b5" exitCode=0 Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.486391 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cxmb" event={"ID":"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d","Type":"ContainerDied","Data":"0f82b024549ee83a881868bd2c19b000a7cffa515c0eb3408d9f7786124fe2b5"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.486425 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cxmb" event={"ID":"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d","Type":"ContainerStarted","Data":"59f8624cd8e39c82dd96961ced8d1d218ad0fa894351a1c127c2e215e53ed074"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.488325 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7nbtt" event={"ID":"6b592cf0-e1b9-4e74-8018-f244f2fc25b1","Type":"ContainerStarted","Data":"faa4e5ef9ae5cc48bc5f78a037df77050cf327e7ebc298de016ab0097c705310"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.490598 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9wc72" event={"ID":"3302c38d-b1ae-4032-9ede-f26e1de76fc8","Type":"ContainerDied","Data":"c83fe3d97f16f8cfdd2a68ca3add816c303219b9f757dc7ee620d3730cdff4ca"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.490646 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c83fe3d97f16f8cfdd2a68ca3add816c303219b9f757dc7ee620d3730cdff4ca" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.490697 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9wc72" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.498121 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerStarted","Data":"f6860d6673ae88b1d5b2865d3976e9d52bb6733abf0b594b74401cf811c28168"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.501360 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" event={"ID":"36131c9c-3736-4825-85bc-27645ca80178","Type":"ContainerDied","Data":"6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0"} Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.501406 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.501414 4842 scope.go:117] "RemoveContainer" containerID="85e8393e9569a57963eb62a7fc2789cc78f00ec82ee42f10daa4218d20ab55a1" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.542674 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-7nbtt" podStartSLOduration=2.72789292 podStartE2EDuration="7.542653649s" podCreationTimestamp="2025-11-11 13:57:03 +0000 UTC" firstStartedPulling="2025-11-11 13:57:04.833550732 +0000 UTC m=+1635.493840351" lastFinishedPulling="2025-11-11 13:57:09.648311461 +0000 UTC m=+1640.308601080" observedRunningTime="2025-11-11 13:57:10.518193362 +0000 UTC m=+1641.178482981" watchObservedRunningTime="2025-11-11 13:57:10.542653649 +0000 UTC m=+1641.202943278" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.547855 4842 scope.go:117] "RemoveContainer" containerID="c5ce13bc413677b767e22ec75fc3d7ca4a379461549148979323f837b0a4ac6d" Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.563515 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:57:10 crc kubenswrapper[4842]: I1111 13:57:10.575606 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-788ccdd4d5-nwmgv"] Nov 11 13:57:10 crc kubenswrapper[4842]: E1111 13:57:10.701672 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36131c9c_3736_4825_85bc_27645ca80178.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36131c9c_3736_4825_85bc_27645ca80178.slice/crio-6b5caf79d385e518c1e40dd96fbaa4514b8941b47ae670fb2afe9735af43bcc0\": RecentStats: unable to find data in memory cache]" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.070909 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36131c9c-3736-4825-85bc-27645ca80178" path="/var/lib/kubelet/pods/36131c9c-3736-4825-85bc-27645ca80178/volumes" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.195215 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.202715 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.302800 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4t4g\" (UniqueName: \"kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g\") pod \"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d\" (UID: \"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d\") " Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.302869 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld55f\" (UniqueName: \"kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f\") pod \"765495ea-98a3-43ba-9a52-9cb05487cbb5\" (UID: \"765495ea-98a3-43ba-9a52-9cb05487cbb5\") " Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.321916 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g" (OuterVolumeSpecName: "kube-api-access-d4t4g") pod "36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" (UID: "36bca97a-f661-4ba9-8ed0-0a0f38a2f64d"). InnerVolumeSpecName "kube-api-access-d4t4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.322310 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f" (OuterVolumeSpecName: "kube-api-access-ld55f") pod "765495ea-98a3-43ba-9a52-9cb05487cbb5" (UID: "765495ea-98a3-43ba-9a52-9cb05487cbb5"). InnerVolumeSpecName "kube-api-access-ld55f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.404217 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4t4g\" (UniqueName: \"kubernetes.io/projected/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d-kube-api-access-d4t4g\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.404255 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld55f\" (UniqueName: \"kubernetes.io/projected/765495ea-98a3-43ba-9a52-9cb05487cbb5-kube-api-access-ld55f\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.523657 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cxmb" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.523655 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cxmb" event={"ID":"36bca97a-f661-4ba9-8ed0-0a0f38a2f64d","Type":"ContainerDied","Data":"59f8624cd8e39c82dd96961ced8d1d218ad0fa894351a1c127c2e215e53ed074"} Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.524128 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59f8624cd8e39c82dd96961ced8d1d218ad0fa894351a1c127c2e215e53ed074" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.526675 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerStarted","Data":"3f2ab27f69be57f3b89d410b3aafeb0f9f25b4ddfe1983376d0eb85cbc50270c"} Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.528607 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qsmk8" event={"ID":"765495ea-98a3-43ba-9a52-9cb05487cbb5","Type":"ContainerDied","Data":"0f487b9d13fbeccc165e8afb51511186e0f4ad2f3e647811ed2ffca28c2f2832"} Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.528631 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f487b9d13fbeccc165e8afb51511186e0f4ad2f3e647811ed2ffca28c2f2832" Nov 11 13:57:12 crc kubenswrapper[4842]: I1111 13:57:12.528633 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qsmk8" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515193 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-ed65-account-create-h4xrz"] Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515608 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="765495ea-98a3-43ba-9a52-9cb05487cbb5" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515625 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="765495ea-98a3-43ba-9a52-9cb05487cbb5" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515651 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc207bf-4706-4b38-9695-ab6ca646eac7" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515660 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc207bf-4706-4b38-9695-ab6ca646eac7" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515671 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="dnsmasq-dns" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515680 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="dnsmasq-dns" Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515693 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="init" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515700 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="init" Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515721 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3302c38d-b1ae-4032-9ede-f26e1de76fc8" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515728 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3302c38d-b1ae-4032-9ede-f26e1de76fc8" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: E1111 13:57:13.515750 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515782 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.515989 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="dnsmasq-dns" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.516008 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.516027 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecc207bf-4706-4b38-9695-ab6ca646eac7" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.516054 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="765495ea-98a3-43ba-9a52-9cb05487cbb5" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.516073 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3302c38d-b1ae-4032-9ede-f26e1de76fc8" containerName="mariadb-database-create" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.516819 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.519991 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.524701 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7bqk\" (UniqueName: \"kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk\") pod \"cinder-ed65-account-create-h4xrz\" (UID: \"4b25a8db-967f-4a25-8daa-e7734831d0c4\") " pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.530246 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-ed65-account-create-h4xrz"] Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.626396 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7bqk\" (UniqueName: \"kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk\") pod \"cinder-ed65-account-create-h4xrz\" (UID: \"4b25a8db-967f-4a25-8daa-e7734831d0c4\") " pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.645875 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7bqk\" (UniqueName: \"kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk\") pod \"cinder-ed65-account-create-h4xrz\" (UID: \"4b25a8db-967f-4a25-8daa-e7734831d0c4\") " pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:13 crc kubenswrapper[4842]: I1111 13:57:13.836985 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:14 crc kubenswrapper[4842]: I1111 13:57:14.564248 4842 generic.go:334] "Generic (PLEG): container finished" podID="6b592cf0-e1b9-4e74-8018-f244f2fc25b1" containerID="faa4e5ef9ae5cc48bc5f78a037df77050cf327e7ebc298de016ab0097c705310" exitCode=0 Nov 11 13:57:14 crc kubenswrapper[4842]: I1111 13:57:14.564299 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7nbtt" event={"ID":"6b592cf0-e1b9-4e74-8018-f244f2fc25b1","Type":"ContainerDied","Data":"faa4e5ef9ae5cc48bc5f78a037df77050cf327e7ebc298de016ab0097c705310"} Nov 11 13:57:14 crc kubenswrapper[4842]: I1111 13:57:14.868643 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-788ccdd4d5-nwmgv" podUID="36131c9c-3736-4825-85bc-27645ca80178" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: i/o timeout" Nov 11 13:57:14 crc kubenswrapper[4842]: I1111 13:57:14.961460 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:57:14 crc kubenswrapper[4842]: I1111 13:57:14.961551 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.108660 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.171338 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle\") pod \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.171773 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data\") pod \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.171865 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tl9pb\" (UniqueName: \"kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb\") pod \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\" (UID: \"6b592cf0-e1b9-4e74-8018-f244f2fc25b1\") " Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.179357 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb" (OuterVolumeSpecName: "kube-api-access-tl9pb") pod "6b592cf0-e1b9-4e74-8018-f244f2fc25b1" (UID: "6b592cf0-e1b9-4e74-8018-f244f2fc25b1"). InnerVolumeSpecName "kube-api-access-tl9pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.209290 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b592cf0-e1b9-4e74-8018-f244f2fc25b1" (UID: "6b592cf0-e1b9-4e74-8018-f244f2fc25b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.222881 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data" (OuterVolumeSpecName: "config-data") pod "6b592cf0-e1b9-4e74-8018-f244f2fc25b1" (UID: "6b592cf0-e1b9-4e74-8018-f244f2fc25b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.272960 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tl9pb\" (UniqueName: \"kubernetes.io/projected/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-kube-api-access-tl9pb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.272999 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.273013 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b592cf0-e1b9-4e74-8018-f244f2fc25b1-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.371964 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-ed65-account-create-h4xrz"] Nov 11 13:57:16 crc kubenswrapper[4842]: W1111 13:57:16.375382 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b25a8db_967f_4a25_8daa_e7734831d0c4.slice/crio-532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d WatchSource:0}: Error finding container 532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d: Status 404 returned error can't find the container with id 532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.586954 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerStarted","Data":"d032234bf282adbe08f9d91d7141b25c1a3b2851b0d5bc13405206a923e99d55"} Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.593150 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ed65-account-create-h4xrz" event={"ID":"4b25a8db-967f-4a25-8daa-e7734831d0c4","Type":"ContainerStarted","Data":"85e1e626291faa7e959429213b1eb0324e21ef79108218870afe406c92109563"} Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.593194 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ed65-account-create-h4xrz" event={"ID":"4b25a8db-967f-4a25-8daa-e7734831d0c4","Type":"ContainerStarted","Data":"532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d"} Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.595182 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-lcspd" event={"ID":"b467d4be-8e0e-43a9-b2a7-51cc22c11e25","Type":"ContainerStarted","Data":"1d1c9b5432ee57faf6c2ff49e711da754f1e2505992c9f6c8a6eeb6a03d4771d"} Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.596957 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7nbtt" event={"ID":"6b592cf0-e1b9-4e74-8018-f244f2fc25b1","Type":"ContainerDied","Data":"4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778"} Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.596978 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4db9afa8b6f8143cb1565a5eb1453542f7e1df13b79ed3e75a074f6431a5d778" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.597024 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7nbtt" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.619491 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=17.61947368 podStartE2EDuration="17.61947368s" podCreationTimestamp="2025-11-11 13:56:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:16.613987586 +0000 UTC m=+1647.274277225" watchObservedRunningTime="2025-11-11 13:57:16.61947368 +0000 UTC m=+1647.279763299" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.637163 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-lcspd" podStartSLOduration=5.270000321 podStartE2EDuration="11.637137482s" podCreationTimestamp="2025-11-11 13:57:05 +0000 UTC" firstStartedPulling="2025-11-11 13:57:09.603399735 +0000 UTC m=+1640.263689354" lastFinishedPulling="2025-11-11 13:57:15.970536896 +0000 UTC m=+1646.630826515" observedRunningTime="2025-11-11 13:57:16.633267008 +0000 UTC m=+1647.293556627" watchObservedRunningTime="2025-11-11 13:57:16.637137482 +0000 UTC m=+1647.297427101" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.750338 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:16 crc kubenswrapper[4842]: E1111 13:57:16.750723 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b592cf0-e1b9-4e74-8018-f244f2fc25b1" containerName="keystone-db-sync" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.750734 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b592cf0-e1b9-4e74-8018-f244f2fc25b1" containerName="keystone-db-sync" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.750913 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b592cf0-e1b9-4e74-8018-f244f2fc25b1" containerName="keystone-db-sync" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.751877 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.771442 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.790067 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bfmrc"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.791423 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.796962 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.797171 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.797315 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rxh4x" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.797782 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.839544 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bfmrc"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882512 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882701 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882724 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dr7s\" (UniqueName: \"kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882772 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.882805 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.922417 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.924212 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.927239 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.927430 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.927533 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.927700 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-xvk8n" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.956361 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.969799 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.975481 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.978473 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.978709 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984571 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984658 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984701 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984726 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-684jh\" (UniqueName: \"kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984788 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984809 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dr7s\" (UniqueName: \"kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984837 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984868 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984907 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984933 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.984967 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.985012 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.986231 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.986813 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.987012 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.987749 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:16 crc kubenswrapper[4842]: I1111 13:57:16.994564 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.039012 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dr7s\" (UniqueName: \"kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s\") pod \"dnsmasq-dns-898466c99-fxsnm\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.058176 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.085423 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.086457 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6mwg\" (UniqueName: \"kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.086911 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.093775 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.093825 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xp976\" (UniqueName: \"kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.093877 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.093906 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.093942 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094015 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094051 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094083 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094117 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094165 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094228 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094317 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094383 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094442 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-684jh\" (UniqueName: \"kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094687 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.094720 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.101086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.101330 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.102592 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.105840 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.107761 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.120591 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.132027 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.143134 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-684jh\" (UniqueName: \"kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh\") pod \"keystone-bootstrap-bfmrc\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.165169 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.199964 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xp976\" (UniqueName: \"kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200309 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200351 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200389 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m5t4\" (UniqueName: \"kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200412 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200437 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200454 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200475 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200503 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200533 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200549 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200571 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200591 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200607 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200626 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200647 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6mwg\" (UniqueName: \"kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.200667 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.201308 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-nqmw2"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.201404 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.202376 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.204751 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.205048 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.211681 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.211748 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.211784 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.212048 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-ldgr6" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.216545 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.217825 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.222243 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.223783 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.224085 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.239717 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.250201 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.251778 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6mwg\" (UniqueName: \"kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg\") pod \"horizon-7f4d859fc9-zzhzr\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.257008 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xp976\" (UniqueName: \"kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976\") pod \"ceilometer-0\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.262984 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nqmw2"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.265680 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.299673 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.305867 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.305917 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.305954 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306026 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306061 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m5t4\" (UniqueName: \"kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306509 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306520 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306682 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306779 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.306815 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.307187 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.308156 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.308221 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbb6n\" (UniqueName: \"kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.310764 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.312860 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.313894 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.331886 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m5t4\" (UniqueName: \"kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4\") pod \"horizon-86c87df95f-24v9b\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.361058 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.410924 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411359 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411390 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411460 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbb6n\" (UniqueName: \"kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411511 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411541 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411568 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411622 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411656 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6v44\" (UniqueName: \"kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411691 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411717 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.411938 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.415856 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.429562 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.429685 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.432723 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.437223 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbb6n\" (UniqueName: \"kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n\") pod \"placement-db-sync-nqmw2\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512701 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6v44\" (UniqueName: \"kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512758 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512783 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512814 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512836 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.512894 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.513668 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.515676 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.515778 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.516293 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.516378 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.551171 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6v44\" (UniqueName: \"kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44\") pod \"dnsmasq-dns-85d7795cdc-rprf5\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.581471 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.592987 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nqmw2" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.618215 4842 generic.go:334] "Generic (PLEG): container finished" podID="4b25a8db-967f-4a25-8daa-e7734831d0c4" containerID="85e1e626291faa7e959429213b1eb0324e21ef79108218870afe406c92109563" exitCode=0 Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.619336 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ed65-account-create-h4xrz" event={"ID":"4b25a8db-967f-4a25-8daa-e7734831d0c4","Type":"ContainerDied","Data":"85e1e626291faa7e959429213b1eb0324e21ef79108218870afe406c92109563"} Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.642881 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:17 crc kubenswrapper[4842]: I1111 13:57:17.741060 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.184697 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.334753 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bfmrc"] Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.365389 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.637228 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerStarted","Data":"981935b9d9e72e03df96b39318818c2a1e3699c2df177aff5353aeb4381ddf0a"} Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.642238 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-898466c99-fxsnm" event={"ID":"36376ecc-1296-4538-ad80-f7777f121c13","Type":"ContainerStarted","Data":"7771111df243c731819c4315c5dd7d3d5cc462647de3631c6abb3bcba6827350"} Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.644064 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bfmrc" event={"ID":"3b2102e4-d7be-4aa6-8ca4-105bcba66248","Type":"ContainerStarted","Data":"3448c11620ab504af5ea8d4107da4870017b08722bc3e645ba10bdd5de0a31c7"} Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.651602 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerStarted","Data":"8d6c0e29dc7f3ec01ce6f8279828863ea0e1eb73eb9a1f66a50a86d3b8b684d9"} Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.680540 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nqmw2"] Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.693524 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:57:18 crc kubenswrapper[4842]: W1111 13:57:18.731126 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c8ef126_60a0_4a3d_8d1f_8207a8fa684f.slice/crio-6dc6e03e55fc2e7a6450c6d71c9edb4eca8bb792de82fe1af6ce316aa64c21cf WatchSource:0}: Error finding container 6dc6e03e55fc2e7a6450c6d71c9edb4eca8bb792de82fe1af6ce316aa64c21cf: Status 404 returned error can't find the container with id 6dc6e03e55fc2e7a6450c6d71c9edb4eca8bb792de82fe1af6ce316aa64c21cf Nov 11 13:57:18 crc kubenswrapper[4842]: I1111 13:57:18.734881 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.090988 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.271123 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7bqk\" (UniqueName: \"kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk\") pod \"4b25a8db-967f-4a25-8daa-e7734831d0c4\" (UID: \"4b25a8db-967f-4a25-8daa-e7734831d0c4\") " Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.292389 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk" (OuterVolumeSpecName: "kube-api-access-b7bqk") pod "4b25a8db-967f-4a25-8daa-e7734831d0c4" (UID: "4b25a8db-967f-4a25-8daa-e7734831d0c4"). InnerVolumeSpecName "kube-api-access-b7bqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.325295 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.331933 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.363045 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:57:19 crc kubenswrapper[4842]: E1111 13:57:19.363401 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b25a8db-967f-4a25-8daa-e7734831d0c4" containerName="mariadb-account-create" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.363413 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b25a8db-967f-4a25-8daa-e7734831d0c4" containerName="mariadb-account-create" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.363596 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b25a8db-967f-4a25-8daa-e7734831d0c4" containerName="mariadb-account-create" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.384532 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7bqk\" (UniqueName: \"kubernetes.io/projected/4b25a8db-967f-4a25-8daa-e7734831d0c4-kube-api-access-b7bqk\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.398717 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.412456 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.486333 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc9hp\" (UniqueName: \"kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.486387 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.486546 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.486616 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.486642 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588129 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588188 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588260 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc9hp\" (UniqueName: \"kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588299 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588388 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.588691 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.589783 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.591092 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.593647 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.604706 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc9hp\" (UniqueName: \"kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp\") pod \"horizon-6c86bb574c-djp9j\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.667485 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-ed65-account-create-h4xrz" event={"ID":"4b25a8db-967f-4a25-8daa-e7734831d0c4","Type":"ContainerDied","Data":"532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.667808 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="532172b228c14a0593ecc178c2af3e81160dd870a33e804892efd7803313de6d" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.667509 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-ed65-account-create-h4xrz" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.671682 4842 generic.go:334] "Generic (PLEG): container finished" podID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerID="2137bbbb1af3fb8f914150712f4f01562635606d1a7cb5b7e1b72ba405a126b6" exitCode=0 Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.671747 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" event={"ID":"d47bc4a2-0636-4a23-b904-4ca118e84e05","Type":"ContainerDied","Data":"2137bbbb1af3fb8f914150712f4f01562635606d1a7cb5b7e1b72ba405a126b6"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.671774 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" event={"ID":"d47bc4a2-0636-4a23-b904-4ca118e84e05","Type":"ContainerStarted","Data":"85889397f38b61dc9a2eebf4ddd9c65a6a2769cb833ea0f63feb36087b12ee8c"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.674671 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86c87df95f-24v9b" event={"ID":"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f","Type":"ContainerStarted","Data":"6dc6e03e55fc2e7a6450c6d71c9edb4eca8bb792de82fe1af6ce316aa64c21cf"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.679678 4842 generic.go:334] "Generic (PLEG): container finished" podID="36376ecc-1296-4538-ad80-f7777f121c13" containerID="9153b15b81840b8acd3ed79980ab186d0dc2cba1e728912f403516274a65d6c9" exitCode=0 Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.679759 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-898466c99-fxsnm" event={"ID":"36376ecc-1296-4538-ad80-f7777f121c13","Type":"ContainerDied","Data":"9153b15b81840b8acd3ed79980ab186d0dc2cba1e728912f403516274a65d6c9"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.682617 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bfmrc" event={"ID":"3b2102e4-d7be-4aa6-8ca4-105bcba66248","Type":"ContainerStarted","Data":"3eff7858cd7523f0719e1231457101f0d7d42f75444d45843e134aee0e3b38b9"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.690951 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nqmw2" event={"ID":"8a855b49-006b-47a5-a808-c1c3649473aa","Type":"ContainerStarted","Data":"b4b4126e6cb8ac477836d0346338eb3a71da3749a0f1e3686e60acb2c657d081"} Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.741545 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.744972 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bfmrc" podStartSLOduration=3.74495104 podStartE2EDuration="3.74495104s" podCreationTimestamp="2025-11-11 13:57:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:19.741794291 +0000 UTC m=+1650.402083910" watchObservedRunningTime="2025-11-11 13:57:19.74495104 +0000 UTC m=+1650.405240659" Nov 11 13:57:19 crc kubenswrapper[4842]: I1111 13:57:19.813116 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.184939 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309520 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309575 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dr7s\" (UniqueName: \"kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309621 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309732 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309770 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.309820 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0\") pod \"36376ecc-1296-4538-ad80-f7777f121c13\" (UID: \"36376ecc-1296-4538-ad80-f7777f121c13\") " Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.336411 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.342177 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.342253 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s" (OuterVolumeSpecName: "kube-api-access-7dr7s") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "kube-api-access-7dr7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.343295 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.348627 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.355527 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config" (OuterVolumeSpecName: "config") pod "36376ecc-1296-4538-ad80-f7777f121c13" (UID: "36376ecc-1296-4538-ad80-f7777f121c13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415311 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415348 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415361 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415377 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415390 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dr7s\" (UniqueName: \"kubernetes.io/projected/36376ecc-1296-4538-ad80-f7777f121c13-kube-api-access-7dr7s\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.415399 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/36376ecc-1296-4538-ad80-f7777f121c13-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.563206 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:57:20 crc kubenswrapper[4842]: W1111 13:57:20.579233 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9691e8b_e9db_41cf_b455_a3a9219b2d56.slice/crio-69a290d62c4c86d4749dcb595969ff01b1729fe7aa47e80a4c461f14a7c34ae2 WatchSource:0}: Error finding container 69a290d62c4c86d4749dcb595969ff01b1729fe7aa47e80a4c461f14a7c34ae2: Status 404 returned error can't find the container with id 69a290d62c4c86d4749dcb595969ff01b1729fe7aa47e80a4c461f14a7c34ae2 Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.709860 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" event={"ID":"d47bc4a2-0636-4a23-b904-4ca118e84e05","Type":"ContainerStarted","Data":"486feb2a68cc8d9e4e864555f55fd3c9a846bb6700c5860e4f0c98309535e8e1"} Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.711176 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.713930 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerStarted","Data":"69a290d62c4c86d4749dcb595969ff01b1729fe7aa47e80a4c461f14a7c34ae2"} Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.717142 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-898466c99-fxsnm" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.717400 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-898466c99-fxsnm" event={"ID":"36376ecc-1296-4538-ad80-f7777f121c13","Type":"ContainerDied","Data":"7771111df243c731819c4315c5dd7d3d5cc462647de3631c6abb3bcba6827350"} Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.717453 4842 scope.go:117] "RemoveContainer" containerID="9153b15b81840b8acd3ed79980ab186d0dc2cba1e728912f403516274a65d6c9" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.746350 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" podStartSLOduration=3.746328467 podStartE2EDuration="3.746328467s" podCreationTimestamp="2025-11-11 13:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:20.731272018 +0000 UTC m=+1651.391561637" watchObservedRunningTime="2025-11-11 13:57:20.746328467 +0000 UTC m=+1651.406618086" Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.790821 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:20 crc kubenswrapper[4842]: I1111 13:57:20.797814 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-898466c99-fxsnm"] Nov 11 13:57:21 crc kubenswrapper[4842]: I1111 13:57:21.744358 4842 generic.go:334] "Generic (PLEG): container finished" podID="b467d4be-8e0e-43a9-b2a7-51cc22c11e25" containerID="1d1c9b5432ee57faf6c2ff49e711da754f1e2505992c9f6c8a6eeb6a03d4771d" exitCode=0 Nov 11 13:57:21 crc kubenswrapper[4842]: I1111 13:57:21.744433 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-lcspd" event={"ID":"b467d4be-8e0e-43a9-b2a7-51cc22c11e25","Type":"ContainerDied","Data":"1d1c9b5432ee57faf6c2ff49e711da754f1e2505992c9f6c8a6eeb6a03d4771d"} Nov 11 13:57:22 crc kubenswrapper[4842]: I1111 13:57:22.080819 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36376ecc-1296-4538-ad80-f7777f121c13" path="/var/lib/kubelet/pods/36376ecc-1296-4538-ad80-f7777f121c13/volumes" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.740095 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-798f-account-create-j4bvf"] Nov 11 13:57:23 crc kubenswrapper[4842]: E1111 13:57:23.740968 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36376ecc-1296-4538-ad80-f7777f121c13" containerName="init" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.740985 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="36376ecc-1296-4538-ad80-f7777f121c13" containerName="init" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.741228 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="36376ecc-1296-4538-ad80-f7777f121c13" containerName="init" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.741922 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.744507 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.759222 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-798f-account-create-j4bvf"] Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.793734 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-777jm\" (UniqueName: \"kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm\") pod \"barbican-798f-account-create-j4bvf\" (UID: \"5d1e700d-ece9-4398-9bfe-d36b8fe07607\") " pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.846891 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-r24nh"] Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.851050 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.858081 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-slnb6" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.858420 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.858570 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.865711 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-r24nh"] Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896072 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896149 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896421 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896497 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ph7j\" (UniqueName: \"kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896615 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896766 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-777jm\" (UniqueName: \"kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm\") pod \"barbican-798f-account-create-j4bvf\" (UID: \"5d1e700d-ece9-4398-9bfe-d36b8fe07607\") " pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.896838 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.918530 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-777jm\" (UniqueName: \"kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm\") pod \"barbican-798f-account-create-j4bvf\" (UID: \"5d1e700d-ece9-4398-9bfe-d36b8fe07607\") " pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998765 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998798 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998853 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998859 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998923 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ph7j\" (UniqueName: \"kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:23 crc kubenswrapper[4842]: I1111 13:57:23.998959 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.004978 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.006195 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.006216 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.007623 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.017557 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ph7j\" (UniqueName: \"kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j\") pod \"cinder-db-sync-r24nh\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.080566 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.192671 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r24nh" Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.838742 4842 generic.go:334] "Generic (PLEG): container finished" podID="3b2102e4-d7be-4aa6-8ca4-105bcba66248" containerID="3eff7858cd7523f0719e1231457101f0d7d42f75444d45843e134aee0e3b38b9" exitCode=0 Nov 11 13:57:24 crc kubenswrapper[4842]: I1111 13:57:24.838792 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bfmrc" event={"ID":"3b2102e4-d7be-4aa6-8ca4-105bcba66248","Type":"ContainerDied","Data":"3eff7858cd7523f0719e1231457101f0d7d42f75444d45843e134aee0e3b38b9"} Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.130751 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.136276 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.140159 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.153871 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.171055 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-670a-account-create-dtqhk"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.178043 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.186759 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.195231 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.203534 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-670a-account-create-dtqhk"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.226710 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241378 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcrzp\" (UniqueName: \"kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp\") pod \"glance-670a-account-create-dtqhk\" (UID: \"a47141a6-6da7-4e16-b6c5-299f5709caa6\") " pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241460 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241503 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241578 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241606 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241631 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241687 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.241852 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jwf7\" (UniqueName: \"kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.252444 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7ccc6f5856-tt6gw"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.254587 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.264886 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-32ac-account-create-dtmwr"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.267382 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.272772 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.278348 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7ccc6f5856-tt6gw"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.290543 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-32ac-account-create-dtmwr"] Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343048 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jwf7\" (UniqueName: \"kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343120 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-combined-ca-bundle\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343154 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcrzp\" (UniqueName: \"kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp\") pod \"glance-670a-account-create-dtqhk\" (UID: \"a47141a6-6da7-4e16-b6c5-299f5709caa6\") " pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343177 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343210 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343263 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-config-data\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343309 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343334 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-scripts\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343358 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343376 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343509 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343575 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-tls-certs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343625 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dde43d8b-9a6f-4506-9285-0606a6e04361-logs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343644 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-secret-key\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343865 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm9sz\" (UniqueName: \"kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz\") pod \"neutron-32ac-account-create-dtmwr\" (UID: \"1fcd2f4b-fd39-425d-a3c8-382a5020d38b\") " pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.343906 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4686t\" (UniqueName: \"kubernetes.io/projected/dde43d8b-9a6f-4506-9285-0606a6e04361-kube-api-access-4686t\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.344091 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.344836 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.346607 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.350789 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.354813 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.357606 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.365913 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jwf7\" (UniqueName: \"kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7\") pod \"horizon-6dfb9d8bf8-tjb9d\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.371820 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcrzp\" (UniqueName: \"kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp\") pod \"glance-670a-account-create-dtqhk\" (UID: \"a47141a6-6da7-4e16-b6c5-299f5709caa6\") " pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445637 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-tls-certs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445709 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dde43d8b-9a6f-4506-9285-0606a6e04361-logs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445732 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-secret-key\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445822 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm9sz\" (UniqueName: \"kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz\") pod \"neutron-32ac-account-create-dtmwr\" (UID: \"1fcd2f4b-fd39-425d-a3c8-382a5020d38b\") " pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445853 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4686t\" (UniqueName: \"kubernetes.io/projected/dde43d8b-9a6f-4506-9285-0606a6e04361-kube-api-access-4686t\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445896 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-combined-ca-bundle\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445965 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-config-data\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.445993 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-scripts\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.446246 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dde43d8b-9a6f-4506-9285-0606a6e04361-logs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.446997 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-scripts\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.447566 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dde43d8b-9a6f-4506-9285-0606a6e04361-config-data\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.460607 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-tls-certs\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.461960 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-combined-ca-bundle\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.462937 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dde43d8b-9a6f-4506-9285-0606a6e04361-horizon-secret-key\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.463744 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4686t\" (UniqueName: \"kubernetes.io/projected/dde43d8b-9a6f-4506-9285-0606a6e04361-kube-api-access-4686t\") pod \"horizon-7ccc6f5856-tt6gw\" (UID: \"dde43d8b-9a6f-4506-9285-0606a6e04361\") " pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.464244 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm9sz\" (UniqueName: \"kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz\") pod \"neutron-32ac-account-create-dtmwr\" (UID: \"1fcd2f4b-fd39-425d-a3c8-382a5020d38b\") " pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.495331 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.538022 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.584858 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:26 crc kubenswrapper[4842]: I1111 13:57:26.593719 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:27 crc kubenswrapper[4842]: I1111 13:57:27.644326 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:57:27 crc kubenswrapper[4842]: I1111 13:57:27.746461 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:57:27 crc kubenswrapper[4842]: I1111 13:57:27.748330 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8447565c-vnzdx" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" containerID="cri-o://7a4f4b51854c2c8394054164cf43ea1aae702f769713b59232dc15735f0ac776" gracePeriod=10 Nov 11 13:57:28 crc kubenswrapper[4842]: I1111 13:57:28.888784 4842 generic.go:334] "Generic (PLEG): container finished" podID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerID="7a4f4b51854c2c8394054164cf43ea1aae702f769713b59232dc15735f0ac776" exitCode=0 Nov 11 13:57:28 crc kubenswrapper[4842]: I1111 13:57:28.888858 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447565c-vnzdx" event={"ID":"fc8b995c-8577-436c-8341-2cc8e9094a10","Type":"ContainerDied","Data":"7a4f4b51854c2c8394054164cf43ea1aae702f769713b59232dc15735f0ac776"} Nov 11 13:57:28 crc kubenswrapper[4842]: I1111 13:57:28.970574 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8447565c-vnzdx" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: connect: connection refused" Nov 11 13:57:29 crc kubenswrapper[4842]: I1111 13:57:29.740250 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 11 13:57:29 crc kubenswrapper[4842]: I1111 13:57:29.745669 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 11 13:57:29 crc kubenswrapper[4842]: I1111 13:57:29.900930 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 11 13:57:32 crc kubenswrapper[4842]: E1111 13:57:32.214148 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Nov 11 13:57:32 crc kubenswrapper[4842]: E1111 13:57:32.214815 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest" Nov 11 13:57:32 crc kubenswrapper[4842]: E1111 13:57:32.215038 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:38.102.83.132:5001/podified-master-centos10/openstack-ceilometer-central:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n97h98h558h559h4hf7h676hc5h5bh5ffh6ch669h76h58dh5f4h59bh554h64h5d8h68h6bh5cdh677h5b4h665h65dh569h575h97h55h88hcdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xp976,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d8908777-1ed6-42fa-8642-5c388d9f0b4e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.425027 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.574610 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9dnt\" (UniqueName: \"kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt\") pod \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.574661 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data\") pod \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.574739 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data\") pod \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.574800 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle\") pod \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\" (UID: \"b467d4be-8e0e-43a9-b2a7-51cc22c11e25\") " Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.593152 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt" (OuterVolumeSpecName: "kube-api-access-r9dnt") pod "b467d4be-8e0e-43a9-b2a7-51cc22c11e25" (UID: "b467d4be-8e0e-43a9-b2a7-51cc22c11e25"). InnerVolumeSpecName "kube-api-access-r9dnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.616387 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b467d4be-8e0e-43a9-b2a7-51cc22c11e25" (UID: "b467d4be-8e0e-43a9-b2a7-51cc22c11e25"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.624226 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b467d4be-8e0e-43a9-b2a7-51cc22c11e25" (UID: "b467d4be-8e0e-43a9-b2a7-51cc22c11e25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.651671 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data" (OuterVolumeSpecName: "config-data") pod "b467d4be-8e0e-43a9-b2a7-51cc22c11e25" (UID: "b467d4be-8e0e-43a9-b2a7-51cc22c11e25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.677791 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9dnt\" (UniqueName: \"kubernetes.io/projected/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-kube-api-access-r9dnt\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.677841 4842 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.677853 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.677863 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b467d4be-8e0e-43a9-b2a7-51cc22c11e25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.926905 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-lcspd" event={"ID":"b467d4be-8e0e-43a9-b2a7-51cc22c11e25","Type":"ContainerDied","Data":"ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254"} Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.926941 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca92e43cb5243e0a2e1226385fd496971df30437b5785b1f6286caf87d190254" Nov 11 13:57:32 crc kubenswrapper[4842]: I1111 13:57:32.926973 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-lcspd" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.677763 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: E1111 13:57:33.678575 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b467d4be-8e0e-43a9-b2a7-51cc22c11e25" containerName="watcher-db-sync" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.678601 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b467d4be-8e0e-43a9-b2a7-51cc22c11e25" containerName="watcher-db-sync" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.678934 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b467d4be-8e0e-43a9-b2a7-51cc22c11e25" containerName="watcher-db-sync" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.682170 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.687176 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.688411 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.694429 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-ctgrf" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.762424 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.763910 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.767709 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.775810 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.799077 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.799431 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.799480 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.799518 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59l8j\" (UniqueName: \"kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.799575 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.812595 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.814216 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.816529 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.823921 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903730 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59l8j\" (UniqueName: \"kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903814 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903850 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-config-data\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903938 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.903973 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904002 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904206 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glvx2\" (UniqueName: \"kubernetes.io/projected/9f38d3ba-5c82-4503-a865-35767c1f1147-kube-api-access-glvx2\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904270 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904303 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f38d3ba-5c82-4503-a865-35767c1f1147-logs\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904352 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904443 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904493 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqlgg\" (UniqueName: \"kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904526 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.904975 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.909878 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.916035 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.919768 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.922654 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59l8j\" (UniqueName: \"kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j\") pod \"watcher-api-0\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " pod="openstack/watcher-api-0" Nov 11 13:57:33 crc kubenswrapper[4842]: I1111 13:57:33.999313 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.005975 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqlgg\" (UniqueName: \"kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006513 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006589 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006637 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-config-data\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006719 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006775 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006873 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glvx2\" (UniqueName: \"kubernetes.io/projected/9f38d3ba-5c82-4503-a865-35767c1f1147-kube-api-access-glvx2\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006908 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f38d3ba-5c82-4503-a865-35767c1f1147-logs\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.006992 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.007775 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.010806 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.011479 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f38d3ba-5c82-4503-a865-35767c1f1147-logs\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.012491 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-config-data\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.012812 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f38d3ba-5c82-4503-a865-35767c1f1147-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.015976 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.027043 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.029257 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqlgg\" (UniqueName: \"kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg\") pod \"watcher-decision-engine-0\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.031881 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glvx2\" (UniqueName: \"kubernetes.io/projected/9f38d3ba-5c82-4503-a865-35767c1f1147-kube-api-access-glvx2\") pod \"watcher-applier-0\" (UID: \"9f38d3ba-5c82-4503-a865-35767c1f1147\") " pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.079635 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.129984 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.256834 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.256909 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-placement-api:watcher_latest" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.257067 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:38.102.83.132:5001/podified-master-centos10/openstack-placement-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbb6n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-nqmw2_openstack(8a855b49-006b-47a5-a808-c1c3649473aa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.258610 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-nqmw2" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.272443 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.272524 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-horizon:watcher_latest" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.272984 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:38.102.83.132:5001/podified-master-centos10/openstack-horizon:watcher_latest,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n66fh549h666h58bh6ch5fbh646h5f4h5d7h8h86h55fh97h55h65ch664h644hb8hbdh65ch66h666h5c5h6h56h5bbh669h576h578h55h5dh574q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4m5t4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-86c87df95f-24v9b_openstack(4c8ef126-60a0-4a3d-8d1f-8207a8fa684f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:57:34 crc kubenswrapper[4842]: E1111 13:57:34.288702 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-horizon:watcher_latest\\\"\"]" pod="openstack/horizon-86c87df95f-24v9b" podUID="4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.323745 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.334927 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415370 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415468 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415511 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415541 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-684jh\" (UniqueName: \"kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415561 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415581 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2cv8\" (UniqueName: \"kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415623 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415681 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415702 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415731 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415762 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc\") pod \"fc8b995c-8577-436c-8341-2cc8e9094a10\" (UID: \"fc8b995c-8577-436c-8341-2cc8e9094a10\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.415807 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle\") pod \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\" (UID: \"3b2102e4-d7be-4aa6-8ca4-105bcba66248\") " Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.420268 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8" (OuterVolumeSpecName: "kube-api-access-r2cv8") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "kube-api-access-r2cv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.423828 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh" (OuterVolumeSpecName: "kube-api-access-684jh") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "kube-api-access-684jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.424554 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.430424 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts" (OuterVolumeSpecName: "scripts") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.442127 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.477451 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data" (OuterVolumeSpecName: "config-data") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.492911 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b2102e4-d7be-4aa6-8ca4-105bcba66248" (UID: "3b2102e4-d7be-4aa6-8ca4-105bcba66248"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526479 4842 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526511 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526522 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-684jh\" (UniqueName: \"kubernetes.io/projected/3b2102e4-d7be-4aa6-8ca4-105bcba66248-kube-api-access-684jh\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526532 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2cv8\" (UniqueName: \"kubernetes.io/projected/fc8b995c-8577-436c-8341-2cc8e9094a10-kube-api-access-r2cv8\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526541 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526549 4842 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.526557 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b2102e4-d7be-4aa6-8ca4-105bcba66248-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.531053 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.536777 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.544149 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.567528 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config" (OuterVolumeSpecName: "config") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.579813 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fc8b995c-8577-436c-8341-2cc8e9094a10" (UID: "fc8b995c-8577-436c-8341-2cc8e9094a10"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.628611 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.628642 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.628654 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.628664 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.628675 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8b995c-8577-436c-8341-2cc8e9094a10-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.966982 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bfmrc" event={"ID":"3b2102e4-d7be-4aa6-8ca4-105bcba66248","Type":"ContainerDied","Data":"3448c11620ab504af5ea8d4107da4870017b08722bc3e645ba10bdd5de0a31c7"} Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.967261 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3448c11620ab504af5ea8d4107da4870017b08722bc3e645ba10bdd5de0a31c7" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.967334 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bfmrc" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.983953 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8447565c-vnzdx" event={"ID":"fc8b995c-8577-436c-8341-2cc8e9094a10","Type":"ContainerDied","Data":"3ccb40a85241aa495253d6d58ee32bb0256be3f4a05441fafb7f863c38af2c0d"} Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.983997 4842 scope.go:117] "RemoveContainer" containerID="7a4f4b51854c2c8394054164cf43ea1aae702f769713b59232dc15735f0ac776" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.984201 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8447565c-vnzdx" Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.986049 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-r24nh"] Nov 11 13:57:34 crc kubenswrapper[4842]: I1111 13:57:34.999042 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerStarted","Data":"3c7f7187d95201e58f7bc3338b45c3d5c3eb2547c4f721f2da0b227915a20661"} Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.001577 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerStarted","Data":"a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14"} Nov 11 13:57:35 crc kubenswrapper[4842]: E1111 13:57:35.007472 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-placement-api:watcher_latest\\\"\"" pod="openstack/placement-db-sync-nqmw2" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.099866 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.112277 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8447565c-vnzdx"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.483658 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bfmrc"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.491540 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bfmrc"] Nov 11 13:57:35 crc kubenswrapper[4842]: W1111 13:57:35.492721 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69d5134b_7c5b_40d9_bcbd_a1bd368a358d.slice/crio-038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b WatchSource:0}: Error finding container 038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b: Status 404 returned error can't find the container with id 038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.503732 4842 scope.go:117] "RemoveContainer" containerID="f5f3b1e3a7ec344316a61312deae227ba34e05a1769cde998ee13a09a2b848ac" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601160 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-547zg"] Nov 11 13:57:35 crc kubenswrapper[4842]: E1111 13:57:35.601598 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601614 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" Nov 11 13:57:35 crc kubenswrapper[4842]: E1111 13:57:35.601628 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="init" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601634 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="init" Nov 11 13:57:35 crc kubenswrapper[4842]: E1111 13:57:35.601645 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b2102e4-d7be-4aa6-8ca4-105bcba66248" containerName="keystone-bootstrap" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601651 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b2102e4-d7be-4aa6-8ca4-105bcba66248" containerName="keystone-bootstrap" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601824 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b2102e4-d7be-4aa6-8ca4-105bcba66248" containerName="keystone-bootstrap" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.601846 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.602490 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.608057 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.608827 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-547zg"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.609231 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.609498 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rxh4x" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.612281 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.680344 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-798f-account-create-j4bvf"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.707587 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-670a-account-create-dtqhk"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.722381 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7ccc6f5856-tt6gw"] Nov 11 13:57:35 crc kubenswrapper[4842]: W1111 13:57:35.735950 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d1e700d_ece9_4398_9bfe_d36b8fe07607.slice/crio-5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47 WatchSource:0}: Error finding container 5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47: Status 404 returned error can't find the container with id 5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47 Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.757232 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761694 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2scqw\" (UniqueName: \"kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761738 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761767 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761805 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761823 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.761849 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.771320 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.779957 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.855126 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864474 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2scqw\" (UniqueName: \"kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864554 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864601 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864662 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864684 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.864710 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.876413 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.876928 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.881553 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.881757 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.899617 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2scqw\" (UniqueName: \"kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.905381 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys\") pod \"keystone-bootstrap-547zg\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.962842 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-32ac-account-create-dtmwr"] Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.981304 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.982992 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs\") pod \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.983167 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m5t4\" (UniqueName: \"kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4\") pod \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.983273 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data\") pod \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.983327 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key\") pod \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.984573 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts\") pod \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\" (UID: \"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f\") " Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.984789 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs" (OuterVolumeSpecName: "logs") pod "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" (UID: "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.985440 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data" (OuterVolumeSpecName: "config-data") pod "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" (UID: "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.988399 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts" (OuterVolumeSpecName: "scripts") pod "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" (UID: "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.989442 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.990462 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.990548 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.995894 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4" (OuterVolumeSpecName: "kube-api-access-4m5t4") pod "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" (UID: "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"). InnerVolumeSpecName "kube-api-access-4m5t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:35 crc kubenswrapper[4842]: I1111 13:57:35.997358 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" (UID: "4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.003109 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.035433 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerStarted","Data":"b4ab738ee51ab22fb123e94ee08d898782bb15abe34cf3d94cf40c0fd32a4ff5"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.035562 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f4d859fc9-zzhzr" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon-log" containerID="cri-o://3c7f7187d95201e58f7bc3338b45c3d5c3eb2547c4f721f2da0b227915a20661" gracePeriod=30 Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.035684 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f4d859fc9-zzhzr" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon" containerID="cri-o://b4ab738ee51ab22fb123e94ee08d898782bb15abe34cf3d94cf40c0fd32a4ff5" gracePeriod=30 Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.042783 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86c87df95f-24v9b" event={"ID":"4c8ef126-60a0-4a3d-8d1f-8207a8fa684f","Type":"ContainerDied","Data":"6dc6e03e55fc2e7a6450c6d71c9edb4eca8bb792de82fe1af6ce316aa64c21cf"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.042915 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.047470 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-798f-account-create-j4bvf" event={"ID":"5d1e700d-ece9-4398-9bfe-d36b8fe07607","Type":"ContainerStarted","Data":"5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.054196 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7ccc6f5856-tt6gw" event={"ID":"dde43d8b-9a6f-4506-9285-0606a6e04361","Type":"ContainerStarted","Data":"b9c6d8e1f3f94e112210b27d8e31940d2948049f257e5393abac70b13ea68713"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.054243 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7ccc6f5856-tt6gw" event={"ID":"dde43d8b-9a6f-4506-9285-0606a6e04361","Type":"ContainerStarted","Data":"e1366e6835354dba3a1229351836d6f0ce46246e8db9d3e4236a0672dfffc50c"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.057059 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-32ac-account-create-dtmwr" event={"ID":"1fcd2f4b-fd39-425d-a3c8-382a5020d38b","Type":"ContainerStarted","Data":"76d99e0ce3a25cf6555d5e4c21dca7a191cbf66974f325e07dcfab63a91dc840"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.072237 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7f4d859fc9-zzhzr" podStartSLOduration=3.855193201 podStartE2EDuration="20.072221895s" podCreationTimestamp="2025-11-11 13:57:16 +0000 UTC" firstStartedPulling="2025-11-11 13:57:18.198445246 +0000 UTC m=+1648.858734865" lastFinishedPulling="2025-11-11 13:57:34.41547393 +0000 UTC m=+1665.075763559" observedRunningTime="2025-11-11 13:57:36.061144223 +0000 UTC m=+1666.721433862" watchObservedRunningTime="2025-11-11 13:57:36.072221895 +0000 UTC m=+1666.732511514" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.083734 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b2102e4-d7be-4aa6-8ca4-105bcba66248" path="/var/lib/kubelet/pods/3b2102e4-d7be-4aa6-8ca4-105bcba66248/volumes" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.084276 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" path="/var/lib/kubelet/pods/fc8b995c-8577-436c-8341-2cc8e9094a10/volumes" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.085022 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerStarted","Data":"e60d99a36a19aea64b06383a5052457cc757278a33a92f510a89fee64a8bbbf3"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.085044 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerStarted","Data":"5fada185db5847a3fb73ad8abb99ebfbc8ef0f35fc193c54109495d98a05c9a4"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.085055 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"9f38d3ba-5c82-4503-a865-35767c1f1147","Type":"ContainerStarted","Data":"4501ef081fa7aa28bb4cd8f8266dff98ee6788610958527514e0214467e4e227"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.085064 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerStarted","Data":"0cd1e39ca91839694922b9914988c2bd46d274528e527d50fd043764240b4605"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.090744 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerStarted","Data":"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.091619 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m5t4\" (UniqueName: \"kubernetes.io/projected/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-kube-api-access-4m5t4\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.091647 4842 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.096130 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r24nh" event={"ID":"69d5134b-7c5b-40d9-bcbd-a1bd368a358d","Type":"ContainerStarted","Data":"038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.112156 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerStarted","Data":"b8fe7eae886c463be84f2e51259f1e8ee851c5acf575e0c0961ffff66aebacf5"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.113430 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c86bb574c-djp9j" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon-log" containerID="cri-o://a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14" gracePeriod=30 Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.114051 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c86bb574c-djp9j" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon" containerID="cri-o://b8fe7eae886c463be84f2e51259f1e8ee851c5acf575e0c0961ffff66aebacf5" gracePeriod=30 Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.118573 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-670a-account-create-dtqhk" event={"ID":"a47141a6-6da7-4e16-b6c5-299f5709caa6","Type":"ContainerStarted","Data":"0cb8ebb984cf99efb975405a204ce4bbfdb4ab2f96e1eb6a7a80cbd618882372"} Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.152583 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c86bb574c-djp9j" podStartSLOduration=3.190430279 podStartE2EDuration="17.152560646s" podCreationTimestamp="2025-11-11 13:57:19 +0000 UTC" firstStartedPulling="2025-11-11 13:57:20.581189883 +0000 UTC m=+1651.241479492" lastFinishedPulling="2025-11-11 13:57:34.54332024 +0000 UTC m=+1665.203609859" observedRunningTime="2025-11-11 13:57:36.138145059 +0000 UTC m=+1666.798434678" watchObservedRunningTime="2025-11-11 13:57:36.152560646 +0000 UTC m=+1666.812850265" Nov 11 13:57:36 crc kubenswrapper[4842]: I1111 13:57:36.560547 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-547zg"] Nov 11 13:57:36 crc kubenswrapper[4842]: W1111 13:57:36.577824 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48dceb55_6e5e_400b_a9fa_026a0c06bd41.slice/crio-446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0 WatchSource:0}: Error finding container 446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0: Status 404 returned error can't find the container with id 446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0 Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.161181 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7ccc6f5856-tt6gw" event={"ID":"dde43d8b-9a6f-4506-9285-0606a6e04361","Type":"ContainerStarted","Data":"fc4b68a5bd2fb1bdb2a1d0aaf43a746fe36494844f9987b3cf6f25e9e981fc8f"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.172752 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-547zg" event={"ID":"48dceb55-6e5e-400b-a9fa-026a0c06bd41","Type":"ContainerStarted","Data":"230ea59936243c04daedf5ca034427af9fa2f2ac7f4bc6d8a5abf11e59f059d1"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.172830 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-547zg" event={"ID":"48dceb55-6e5e-400b-a9fa-026a0c06bd41","Type":"ContainerStarted","Data":"446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.177310 4842 generic.go:334] "Generic (PLEG): container finished" podID="a47141a6-6da7-4e16-b6c5-299f5709caa6" containerID="4deeb75fc13633671f698acc4a711bc669b767ee363af61e8fb67ec940977b9a" exitCode=0 Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.177442 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-670a-account-create-dtqhk" event={"ID":"a47141a6-6da7-4e16-b6c5-299f5709caa6","Type":"ContainerDied","Data":"4deeb75fc13633671f698acc4a711bc669b767ee363af61e8fb67ec940977b9a"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.187309 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerStarted","Data":"78398445b8e7bfb04472702d7713c57265db0714a60254fbea76db24d4d62c49"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.187369 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerStarted","Data":"e919adb8a44cb6ca0930a1f361a90dc108e936104f9720da3a7a49cd2d6bf57a"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.192252 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7ccc6f5856-tt6gw" podStartSLOduration=11.192229017 podStartE2EDuration="11.192229017s" podCreationTimestamp="2025-11-11 13:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:37.189545662 +0000 UTC m=+1667.849835301" watchObservedRunningTime="2025-11-11 13:57:37.192229017 +0000 UTC m=+1667.852518636" Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.198711 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerStarted","Data":"16156a15744ab4caa045d503f64c73f6204fc34a7a5e1c0014514c495fa0cea1"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.198792 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerStarted","Data":"9f1eb26708d952d3a57897ca5151ac0c667140743db2e068c1024680b8037628"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.200116 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.201745 4842 generic.go:334] "Generic (PLEG): container finished" podID="5d1e700d-ece9-4398-9bfe-d36b8fe07607" containerID="97359c39304da59018ff5d5dac439fbfb1c093b7f3916d3caa72a2ca01e10f54" exitCode=0 Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.202283 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-798f-account-create-j4bvf" event={"ID":"5d1e700d-ece9-4398-9bfe-d36b8fe07607","Type":"ContainerDied","Data":"97359c39304da59018ff5d5dac439fbfb1c093b7f3916d3caa72a2ca01e10f54"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.222910 4842 generic.go:334] "Generic (PLEG): container finished" podID="1fcd2f4b-fd39-425d-a3c8-382a5020d38b" containerID="39f2c30ec1a9d41eb0871cc2ef9399a52b8dcbeb6a84f6d9b45e364e2824824f" exitCode=0 Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.222955 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-32ac-account-create-dtmwr" event={"ID":"1fcd2f4b-fd39-425d-a3c8-382a5020d38b","Type":"ContainerDied","Data":"39f2c30ec1a9d41eb0871cc2ef9399a52b8dcbeb6a84f6d9b45e364e2824824f"} Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.269230 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.275212 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podStartSLOduration=11.275189982 podStartE2EDuration="11.275189982s" podCreationTimestamp="2025-11-11 13:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:37.240846271 +0000 UTC m=+1667.901135890" watchObservedRunningTime="2025-11-11 13:57:37.275189982 +0000 UTC m=+1667.935479601" Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.287571 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-547zg" podStartSLOduration=2.287548454 podStartE2EDuration="2.287548454s" podCreationTimestamp="2025-11-11 13:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:37.269369217 +0000 UTC m=+1667.929658836" watchObservedRunningTime="2025-11-11 13:57:37.287548454 +0000 UTC m=+1667.947838073" Nov 11 13:57:37 crc kubenswrapper[4842]: I1111 13:57:37.351037 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=4.35101994 podStartE2EDuration="4.35101994s" podCreationTimestamp="2025-11-11 13:57:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:37.31576991 +0000 UTC m=+1667.976059549" watchObservedRunningTime="2025-11-11 13:57:37.35101994 +0000 UTC m=+1668.011309559" Nov 11 13:57:38 crc kubenswrapper[4842]: I1111 13:57:38.968823 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8447565c-vnzdx" podUID="fc8b995c-8577-436c-8341-2cc8e9094a10" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.135:5353: i/o timeout" Nov 11 13:57:39 crc kubenswrapper[4842]: I1111 13:57:39.000294 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 11 13:57:39 crc kubenswrapper[4842]: I1111 13:57:39.242472 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:57:39 crc kubenswrapper[4842]: I1111 13:57:39.814059 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:57:39 crc kubenswrapper[4842]: I1111 13:57:39.892758 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 11 13:57:42 crc kubenswrapper[4842]: I1111 13:57:42.276335 4842 generic.go:334] "Generic (PLEG): container finished" podID="48dceb55-6e5e-400b-a9fa-026a0c06bd41" containerID="230ea59936243c04daedf5ca034427af9fa2f2ac7f4bc6d8a5abf11e59f059d1" exitCode=0 Nov 11 13:57:42 crc kubenswrapper[4842]: I1111 13:57:42.276630 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-547zg" event={"ID":"48dceb55-6e5e-400b-a9fa-026a0c06bd41","Type":"ContainerDied","Data":"230ea59936243c04daedf5ca034427af9fa2f2ac7f4bc6d8a5abf11e59f059d1"} Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.050864 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.057470 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.076422 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.231092 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rm9sz\" (UniqueName: \"kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz\") pod \"1fcd2f4b-fd39-425d-a3c8-382a5020d38b\" (UID: \"1fcd2f4b-fd39-425d-a3c8-382a5020d38b\") " Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.231203 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcrzp\" (UniqueName: \"kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp\") pod \"a47141a6-6da7-4e16-b6c5-299f5709caa6\" (UID: \"a47141a6-6da7-4e16-b6c5-299f5709caa6\") " Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.231281 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-777jm\" (UniqueName: \"kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm\") pod \"5d1e700d-ece9-4398-9bfe-d36b8fe07607\" (UID: \"5d1e700d-ece9-4398-9bfe-d36b8fe07607\") " Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.237537 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz" (OuterVolumeSpecName: "kube-api-access-rm9sz") pod "1fcd2f4b-fd39-425d-a3c8-382a5020d38b" (UID: "1fcd2f4b-fd39-425d-a3c8-382a5020d38b"). InnerVolumeSpecName "kube-api-access-rm9sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.237603 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm" (OuterVolumeSpecName: "kube-api-access-777jm") pod "5d1e700d-ece9-4398-9bfe-d36b8fe07607" (UID: "5d1e700d-ece9-4398-9bfe-d36b8fe07607"). InnerVolumeSpecName "kube-api-access-777jm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.242302 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp" (OuterVolumeSpecName: "kube-api-access-qcrzp") pod "a47141a6-6da7-4e16-b6c5-299f5709caa6" (UID: "a47141a6-6da7-4e16-b6c5-299f5709caa6"). InnerVolumeSpecName "kube-api-access-qcrzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.290880 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-798f-account-create-j4bvf" event={"ID":"5d1e700d-ece9-4398-9bfe-d36b8fe07607","Type":"ContainerDied","Data":"5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47"} Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.290923 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b30177ced352a1c5811a976366107a55a891bae3533ab9c67a51a04e7033f47" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.290981 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-798f-account-create-j4bvf" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.295587 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-32ac-account-create-dtmwr" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.295655 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-32ac-account-create-dtmwr" event={"ID":"1fcd2f4b-fd39-425d-a3c8-382a5020d38b","Type":"ContainerDied","Data":"76d99e0ce3a25cf6555d5e4c21dca7a191cbf66974f325e07dcfab63a91dc840"} Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.295703 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76d99e0ce3a25cf6555d5e4c21dca7a191cbf66974f325e07dcfab63a91dc840" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.300672 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-670a-account-create-dtqhk" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.300791 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-670a-account-create-dtqhk" event={"ID":"a47141a6-6da7-4e16-b6c5-299f5709caa6","Type":"ContainerDied","Data":"0cb8ebb984cf99efb975405a204ce4bbfdb4ab2f96e1eb6a7a80cbd618882372"} Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.300825 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cb8ebb984cf99efb975405a204ce4bbfdb4ab2f96e1eb6a7a80cbd618882372" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.333883 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rm9sz\" (UniqueName: \"kubernetes.io/projected/1fcd2f4b-fd39-425d-a3c8-382a5020d38b-kube-api-access-rm9sz\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.333918 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcrzp\" (UniqueName: \"kubernetes.io/projected/a47141a6-6da7-4e16-b6c5-299f5709caa6-kube-api-access-qcrzp\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:43 crc kubenswrapper[4842]: I1111 13:57:43.333932 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-777jm\" (UniqueName: \"kubernetes.io/projected/5d1e700d-ece9-4398-9bfe-d36b8fe07607-kube-api-access-777jm\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.000062 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.006719 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.319976 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.961119 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.961502 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.961563 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.962484 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 13:57:44 crc kubenswrapper[4842]: I1111 13:57:44.962583 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" gracePeriod=600 Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.122190 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.267279 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.267707 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.267890 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2scqw\" (UniqueName: \"kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.268020 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.268173 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.268316 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle\") pod \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\" (UID: \"48dceb55-6e5e-400b-a9fa-026a0c06bd41\") " Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.280241 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts" (OuterVolumeSpecName: "scripts") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.280360 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw" (OuterVolumeSpecName: "kube-api-access-2scqw") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "kube-api-access-2scqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.280420 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.281059 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.299226 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data" (OuterVolumeSpecName: "config-data") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.299948 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "48dceb55-6e5e-400b-a9fa-026a0c06bd41" (UID: "48dceb55-6e5e-400b-a9fa-026a0c06bd41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.332884 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" exitCode=0 Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.332925 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d"} Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.332997 4842 scope.go:117] "RemoveContainer" containerID="3cf43482baec1eed99bfa20a2dd7fb680f42c7e2fc6a6ff6e8095af8a95ac4ec" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.336385 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-547zg" event={"ID":"48dceb55-6e5e-400b-a9fa-026a0c06bd41","Type":"ContainerDied","Data":"446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0"} Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.336445 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="446f1552aab2828e41ed652d63bc01aa27dd1458646386984070f9f5b2edf2b0" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.336571 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-547zg" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.371917 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.371965 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2scqw\" (UniqueName: \"kubernetes.io/projected/48dceb55-6e5e-400b-a9fa-026a0c06bd41-kube-api-access-2scqw\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.371980 4842 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.371996 4842 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.372009 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:45 crc kubenswrapper[4842]: I1111 13:57:45.372019 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48dceb55-6e5e-400b-a9fa-026a0c06bd41-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323037 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7c6ccd957-kmlcp"] Nov 11 13:57:46 crc kubenswrapper[4842]: E1111 13:57:46.323505 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d1e700d-ece9-4398-9bfe-d36b8fe07607" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323524 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d1e700d-ece9-4398-9bfe-d36b8fe07607" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: E1111 13:57:46.323542 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48dceb55-6e5e-400b-a9fa-026a0c06bd41" containerName="keystone-bootstrap" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323551 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="48dceb55-6e5e-400b-a9fa-026a0c06bd41" containerName="keystone-bootstrap" Nov 11 13:57:46 crc kubenswrapper[4842]: E1111 13:57:46.323572 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a47141a6-6da7-4e16-b6c5-299f5709caa6" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323580 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a47141a6-6da7-4e16-b6c5-299f5709caa6" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: E1111 13:57:46.323597 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fcd2f4b-fd39-425d-a3c8-382a5020d38b" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323604 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fcd2f4b-fd39-425d-a3c8-382a5020d38b" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323842 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a47141a6-6da7-4e16-b6c5-299f5709caa6" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323864 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fcd2f4b-fd39-425d-a3c8-382a5020d38b" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323882 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d1e700d-ece9-4398-9bfe-d36b8fe07607" containerName="mariadb-account-create" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.323890 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="48dceb55-6e5e-400b-a9fa-026a0c06bd41" containerName="keystone-bootstrap" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.324766 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.327276 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.327616 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.327810 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.328056 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.328268 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.328414 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rxh4x" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.344264 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c6ccd957-kmlcp"] Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.450296 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-6xg8p"] Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.451988 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.456196 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rzk8r" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.456402 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.459087 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6xg8p"] Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.492384 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-scripts\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.492449 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-internal-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.492584 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrcxc\" (UniqueName: \"kubernetes.io/projected/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-kube-api-access-qrcxc\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.492825 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-public-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.492949 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-config-data\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.493036 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-combined-ca-bundle\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.493225 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-credential-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.493389 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-fernet-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.495678 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.496263 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.497598 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.586528 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.586603 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.589018 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7ccc6f5856-tt6gw" podUID="dde43d8b-9a6f-4506-9285-0606a6e04361" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.156:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.156:8443: connect: connection refused" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595658 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-public-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595726 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-config-data\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595785 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-combined-ca-bundle\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595866 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-credential-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595897 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.595922 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596653 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-fernet-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596698 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596727 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-scripts\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596766 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-internal-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596852 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrcxc\" (UniqueName: \"kubernetes.io/projected/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-kube-api-access-qrcxc\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.596881 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm9cm\" (UniqueName: \"kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.605589 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-scripts\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.605865 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-internal-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.612521 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-config-data\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.613734 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-combined-ca-bundle\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.623807 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-fernet-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.632160 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-credential-keys\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.634676 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-public-tls-certs\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.635014 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrcxc\" (UniqueName: \"kubernetes.io/projected/e5ae2f35-b0d7-480b-8f4d-cda875e63ec2-kube-api-access-qrcxc\") pod \"keystone-7c6ccd957-kmlcp\" (UID: \"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2\") " pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.647215 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-wnl6v"] Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.648739 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.651662 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.652071 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.654471 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rqm4m" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.667977 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.685707 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-wnl6v"] Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.699068 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm9cm\" (UniqueName: \"kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.699247 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.699277 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.699359 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.705918 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.707650 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.710731 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.718765 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm9cm\" (UniqueName: \"kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm\") pod \"glance-db-sync-6xg8p\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.772709 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6xg8p" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.802033 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.802231 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw2ls\" (UniqueName: \"kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.802323 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.903450 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw2ls\" (UniqueName: \"kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.903524 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.903611 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.914754 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.928240 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw2ls\" (UniqueName: \"kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:46 crc kubenswrapper[4842]: I1111 13:57:46.936164 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config\") pod \"neutron-db-sync-wnl6v\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:47 crc kubenswrapper[4842]: I1111 13:57:47.078007 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:57:47 crc kubenswrapper[4842]: I1111 13:57:47.680744 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:47 crc kubenswrapper[4842]: I1111 13:57:47.681022 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" containerID="cri-o://9f1eb26708d952d3a57897ca5151ac0c667140743db2e068c1024680b8037628" gracePeriod=30 Nov 11 13:57:47 crc kubenswrapper[4842]: I1111 13:57:47.681141 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" containerID="cri-o://16156a15744ab4caa045d503f64c73f6204fc34a7a5e1c0014514c495fa0cea1" gracePeriod=30 Nov 11 13:57:48 crc kubenswrapper[4842]: I1111 13:57:48.372915 4842 generic.go:334] "Generic (PLEG): container finished" podID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerID="9f1eb26708d952d3a57897ca5151ac0c667140743db2e068c1024680b8037628" exitCode=143 Nov 11 13:57:48 crc kubenswrapper[4842]: I1111 13:57:48.373172 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerDied","Data":"9f1eb26708d952d3a57897ca5151ac0c667140743db2e068c1024680b8037628"} Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.000752 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9322/\": dial tcp 10.217.0.158:9322: connect: connection refused" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.000782 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.158:9322/\": dial tcp 10.217.0.158:9322: connect: connection refused" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.059270 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-7nz95"] Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.060744 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.066701 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-f6j7v" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.066818 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.076252 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-7nz95"] Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.163725 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.164135 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.164201 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwxll\" (UniqueName: \"kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.268784 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.268921 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.268962 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwxll\" (UniqueName: \"kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.274993 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.276959 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.284903 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwxll\" (UniqueName: \"kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll\") pod \"barbican-db-sync-7nz95\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.383843 4842 generic.go:334] "Generic (PLEG): container finished" podID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerID="16156a15744ab4caa045d503f64c73f6204fc34a7a5e1c0014514c495fa0cea1" exitCode=0 Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.383914 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerDied","Data":"16156a15744ab4caa045d503f64c73f6204fc34a7a5e1c0014514c495fa0cea1"} Nov 11 13:57:49 crc kubenswrapper[4842]: I1111 13:57:49.434975 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7nz95" Nov 11 13:57:54 crc kubenswrapper[4842]: I1111 13:57:54.000385 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.158:9322/\": dial tcp 10.217.0.158:9322: connect: connection refused" Nov 11 13:57:54 crc kubenswrapper[4842]: I1111 13:57:54.000421 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9322/\": dial tcp 10.217.0.158:9322: connect: connection refused" Nov 11 13:57:55 crc kubenswrapper[4842]: E1111 13:57:55.839762 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:57:56 crc kubenswrapper[4842]: I1111 13:57:56.440487 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:57:56 crc kubenswrapper[4842]: E1111 13:57:56.441136 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:57:56 crc kubenswrapper[4842]: E1111 13:57:56.928607 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 11 13:57:56 crc kubenswrapper[4842]: E1111 13:57:56.929079 4842 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.132:5001/podified-master-centos10/openstack-cinder-api:watcher_latest" Nov 11 13:57:56 crc kubenswrapper[4842]: E1111 13:57:56.933926 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:38.102.83.132:5001/podified-master-centos10/openstack-cinder-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2ph7j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-r24nh_openstack(69d5134b-7c5b-40d9-bcbd-a1bd368a358d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 11 13:57:56 crc kubenswrapper[4842]: E1111 13:57:56.935217 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-r24nh" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.290506 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.325440 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle\") pod \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.325516 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data\") pod \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.325554 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca\") pod \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.325612 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs\") pod \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.325651 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59l8j\" (UniqueName: \"kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j\") pod \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\" (UID: \"500b7ba2-c2f9-4928-97cd-3b3d234625bc\") " Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.327476 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs" (OuterVolumeSpecName: "logs") pod "500b7ba2-c2f9-4928-97cd-3b3d234625bc" (UID: "500b7ba2-c2f9-4928-97cd-3b3d234625bc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.358954 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j" (OuterVolumeSpecName: "kube-api-access-59l8j") pod "500b7ba2-c2f9-4928-97cd-3b3d234625bc" (UID: "500b7ba2-c2f9-4928-97cd-3b3d234625bc"). InnerVolumeSpecName "kube-api-access-59l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.427555 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59l8j\" (UniqueName: \"kubernetes.io/projected/500b7ba2-c2f9-4928-97cd-3b3d234625bc-kube-api-access-59l8j\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.427806 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/500b7ba2-c2f9-4928-97cd-3b3d234625bc-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.447282 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "500b7ba2-c2f9-4928-97cd-3b3d234625bc" (UID: "500b7ba2-c2f9-4928-97cd-3b3d234625bc"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.450846 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "500b7ba2-c2f9-4928-97cd-3b3d234625bc" (UID: "500b7ba2-c2f9-4928-97cd-3b3d234625bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.474221 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerStarted","Data":"42376c60ee02b8662c5a8b1696057b7eee0296cb4d5ee564c4df4b7d2cc6bce0"} Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.480143 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.480313 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"500b7ba2-c2f9-4928-97cd-3b3d234625bc","Type":"ContainerDied","Data":"0cd1e39ca91839694922b9914988c2bd46d274528e527d50fd043764240b4605"} Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.480344 4842 scope.go:117] "RemoveContainer" containerID="16156a15744ab4caa045d503f64c73f6204fc34a7a5e1c0014514c495fa0cea1" Nov 11 13:57:57 crc kubenswrapper[4842]: E1111 13:57:57.495031 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.132:5001/podified-master-centos10/openstack-cinder-api:watcher_latest\\\"\"" pod="openstack/cinder-db-sync-r24nh" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.509508 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=4.447798983 podStartE2EDuration="24.509485674s" podCreationTimestamp="2025-11-11 13:57:33 +0000 UTC" firstStartedPulling="2025-11-11 13:57:35.751002736 +0000 UTC m=+1666.411292355" lastFinishedPulling="2025-11-11 13:57:55.812689427 +0000 UTC m=+1686.472979046" observedRunningTime="2025-11-11 13:57:57.496309806 +0000 UTC m=+1688.156599435" watchObservedRunningTime="2025-11-11 13:57:57.509485674 +0000 UTC m=+1688.169775283" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.530369 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.530400 4842 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.539539 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data" (OuterVolumeSpecName: "config-data") pod "500b7ba2-c2f9-4928-97cd-3b3d234625bc" (UID: "500b7ba2-c2f9-4928-97cd-3b3d234625bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.552226 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=4.720175082 podStartE2EDuration="24.552205331s" podCreationTimestamp="2025-11-11 13:57:33 +0000 UTC" firstStartedPulling="2025-11-11 13:57:36.007007205 +0000 UTC m=+1666.667296814" lastFinishedPulling="2025-11-11 13:57:55.839037444 +0000 UTC m=+1686.499327063" observedRunningTime="2025-11-11 13:57:57.541676406 +0000 UTC m=+1688.201966025" watchObservedRunningTime="2025-11-11 13:57:57.552205331 +0000 UTC m=+1688.212494950" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.556992 4842 scope.go:117] "RemoveContainer" containerID="9f1eb26708d952d3a57897ca5151ac0c667140743db2e068c1024680b8037628" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.631424 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/500b7ba2-c2f9-4928-97cd-3b3d234625bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.771393 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-wnl6v"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.779147 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c6ccd957-kmlcp"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.796189 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-7nz95"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.852166 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.869745 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.898265 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:57 crc kubenswrapper[4842]: E1111 13:57:57.904633 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.904681 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" Nov 11 13:57:57 crc kubenswrapper[4842]: E1111 13:57:57.904723 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.904731 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.904958 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api-log" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.904995 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" containerName="watcher-api" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.906436 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.909194 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.909394 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.911786 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.931409 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942218 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942536 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5n88\" (UniqueName: \"kubernetes.io/projected/bad883a9-7045-46c2-8358-aa3a6d8f7f01-kube-api-access-l5n88\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942613 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942669 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-public-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942690 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-config-data\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.942830 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bad883a9-7045-46c2-8358-aa3a6d8f7f01-logs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:57 crc kubenswrapper[4842]: I1111 13:57:57.943085 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.038860 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6xg8p"] Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044308 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bad883a9-7045-46c2-8358-aa3a6d8f7f01-logs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044384 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044426 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044446 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5n88\" (UniqueName: \"kubernetes.io/projected/bad883a9-7045-46c2-8358-aa3a6d8f7f01-kube-api-access-l5n88\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044486 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044512 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-public-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.044530 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-config-data\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.046160 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bad883a9-7045-46c2-8358-aa3a6d8f7f01-logs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.050411 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.054251 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-config-data\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.054591 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-public-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.061709 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.063661 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bad883a9-7045-46c2-8358-aa3a6d8f7f01-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.064723 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5n88\" (UniqueName: \"kubernetes.io/projected/bad883a9-7045-46c2-8358-aa3a6d8f7f01-kube-api-access-l5n88\") pod \"watcher-api-0\" (UID: \"bad883a9-7045-46c2-8358-aa3a6d8f7f01\") " pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.098176 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="500b7ba2-c2f9-4928-97cd-3b3d234625bc" path="/var/lib/kubelet/pods/500b7ba2-c2f9-4928-97cd-3b3d234625bc/volumes" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.343842 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.515481 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerStarted","Data":"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.523269 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c6ccd957-kmlcp" event={"ID":"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2","Type":"ContainerStarted","Data":"dc44fb8dcaf748314aa04c6fad9b2242553b951712b27f05e6d29cb3fa913fb9"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.523310 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c6ccd957-kmlcp" event={"ID":"e5ae2f35-b0d7-480b-8f4d-cda875e63ec2","Type":"ContainerStarted","Data":"b95ebc5066b97a6ccd3e026511098b2321511cefda45172a8bed1fb0a8dd6131"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.523444 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.525720 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7nz95" event={"ID":"ae52071f-3664-4aac-8657-3351df5c6fff","Type":"ContainerStarted","Data":"d3d7be4e4600b9d246caa0278a8c7afe1e4351ffd5ab34590a07e820c8b30a53"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.528119 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wnl6v" event={"ID":"a02fa3d1-7142-425d-b514-0a647bfda1ca","Type":"ContainerStarted","Data":"ef4daab76392c0a6aeee495a68dc96653c8e12008714269f34a383934f5655c6"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.528149 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wnl6v" event={"ID":"a02fa3d1-7142-425d-b514-0a647bfda1ca","Type":"ContainerStarted","Data":"0e8d0c9ec5ac59d7ed676a1f4a466b3a0e1b11b49db50ecd123da49edee0cdc4"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.556033 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7c6ccd957-kmlcp" podStartSLOduration=12.556014003 podStartE2EDuration="12.556014003s" podCreationTimestamp="2025-11-11 13:57:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:58.547773882 +0000 UTC m=+1689.208063531" watchObservedRunningTime="2025-11-11 13:57:58.556014003 +0000 UTC m=+1689.216303622" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.561504 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"9f38d3ba-5c82-4503-a865-35767c1f1147","Type":"ContainerStarted","Data":"7e9c3850fe85d3a3b66874e618679e4aa4d84ea2d7c1f3b8f637749f431c52b4"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.577807 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-wnl6v" podStartSLOduration=12.577789364000001 podStartE2EDuration="12.577789364s" podCreationTimestamp="2025-11-11 13:57:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:57:58.574818721 +0000 UTC m=+1689.235108340" watchObservedRunningTime="2025-11-11 13:57:58.577789364 +0000 UTC m=+1689.238078983" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.590431 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nqmw2" event={"ID":"8a855b49-006b-47a5-a808-c1c3649473aa","Type":"ContainerStarted","Data":"207313ad3ea06d2b48be541f6e02378b43d76295e9964a536c060373dcd8c022"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.613351 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6xg8p" event={"ID":"c61f37c1-6c58-4ae1-a127-2238733058b4","Type":"ContainerStarted","Data":"6f0ae3796c9f78836a11f19c2009384e8cc7e4d3d083c5c613612346aa99306e"} Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.615847 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-nqmw2" podStartSLOduration=3.269012931 podStartE2EDuration="41.615827793s" podCreationTimestamp="2025-11-11 13:57:17 +0000 UTC" firstStartedPulling="2025-11-11 13:57:18.689547559 +0000 UTC m=+1649.349837178" lastFinishedPulling="2025-11-11 13:57:57.036362421 +0000 UTC m=+1687.696652040" observedRunningTime="2025-11-11 13:57:58.612086634 +0000 UTC m=+1689.272376263" watchObservedRunningTime="2025-11-11 13:57:58.615827793 +0000 UTC m=+1689.276117412" Nov 11 13:57:58 crc kubenswrapper[4842]: I1111 13:57:58.891048 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 11 13:57:59 crc kubenswrapper[4842]: I1111 13:57:59.080013 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 11 13:57:59 crc kubenswrapper[4842]: I1111 13:57:59.304470 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:57:59 crc kubenswrapper[4842]: I1111 13:57:59.627115 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bad883a9-7045-46c2-8358-aa3a6d8f7f01","Type":"ContainerStarted","Data":"e5860c711d3f82582084ac3c93de606de495e71430bdb443b4bbe6e6a84bb13b"} Nov 11 13:57:59 crc kubenswrapper[4842]: I1111 13:57:59.695228 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.441293 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.448564 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7ccc6f5856-tt6gw" Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.525464 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.645711 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bad883a9-7045-46c2-8358-aa3a6d8f7f01","Type":"ContainerStarted","Data":"a1bd819e19d8c8f7873c0bbcbe6b3aee9dfeb307ef446c97fdf1c0921d0d7e69"} Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.645761 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"bad883a9-7045-46c2-8358-aa3a6d8f7f01","Type":"ContainerStarted","Data":"eb4344d8890c0e861f920c7575169f2c0e19c11236e4f4638e213c9b810852f8"} Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.645967 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon-log" containerID="cri-o://e919adb8a44cb6ca0930a1f361a90dc108e936104f9720da3a7a49cd2d6bf57a" gracePeriod=30 Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.646128 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" containerID="cri-o://78398445b8e7bfb04472702d7713c57265db0714a60254fbea76db24d4d62c49" gracePeriod=30 Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.646299 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 11 13:58:01 crc kubenswrapper[4842]: I1111 13:58:01.672055 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=4.672036833 podStartE2EDuration="4.672036833s" podCreationTimestamp="2025-11-11 13:57:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:01.67066777 +0000 UTC m=+1692.330957389" watchObservedRunningTime="2025-11-11 13:58:01.672036833 +0000 UTC m=+1692.332326452" Nov 11 13:58:02 crc kubenswrapper[4842]: I1111 13:58:02.689005 4842 generic.go:334] "Generic (PLEG): container finished" podID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerID="78398445b8e7bfb04472702d7713c57265db0714a60254fbea76db24d4d62c49" exitCode=0 Nov 11 13:58:02 crc kubenswrapper[4842]: I1111 13:58:02.689212 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerDied","Data":"78398445b8e7bfb04472702d7713c57265db0714a60254fbea76db24d4d62c49"} Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.298166 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.300212 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.324538 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.344332 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.365059 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.365219 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwckl\" (UniqueName: \"kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.365254 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.466887 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwckl\" (UniqueName: \"kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.466936 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.467017 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.467495 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.467564 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.486970 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwckl\" (UniqueName: \"kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl\") pod \"certified-operators-qvgph\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.641634 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.699950 4842 generic.go:334] "Generic (PLEG): container finished" podID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerID="42376c60ee02b8662c5a8b1696057b7eee0296cb4d5ee564c4df4b7d2cc6bce0" exitCode=1 Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.700032 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerDied","Data":"42376c60ee02b8662c5a8b1696057b7eee0296cb4d5ee564c4df4b7d2cc6bce0"} Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.700679 4842 scope.go:117] "RemoveContainer" containerID="42376c60ee02b8662c5a8b1696057b7eee0296cb4d5ee564c4df4b7d2cc6bce0" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.705518 4842 generic.go:334] "Generic (PLEG): container finished" podID="8a855b49-006b-47a5-a808-c1c3649473aa" containerID="207313ad3ea06d2b48be541f6e02378b43d76295e9964a536c060373dcd8c022" exitCode=0 Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.705848 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:58:03 crc kubenswrapper[4842]: I1111 13:58:03.706451 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nqmw2" event={"ID":"8a855b49-006b-47a5-a808-c1c3649473aa","Type":"ContainerDied","Data":"207313ad3ea06d2b48be541f6e02378b43d76295e9964a536c060373dcd8c022"} Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.080045 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.107926 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.130834 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.130882 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.130895 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.130903 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.324093 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 11 13:58:04 crc kubenswrapper[4842]: I1111 13:58:04.784993 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.006572 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.141010 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nqmw2" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.300624 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs\") pod \"8a855b49-006b-47a5-a808-c1c3649473aa\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.300681 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts\") pod \"8a855b49-006b-47a5-a808-c1c3649473aa\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.300792 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbb6n\" (UniqueName: \"kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n\") pod \"8a855b49-006b-47a5-a808-c1c3649473aa\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.300902 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data\") pod \"8a855b49-006b-47a5-a808-c1c3649473aa\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.300935 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle\") pod \"8a855b49-006b-47a5-a808-c1c3649473aa\" (UID: \"8a855b49-006b-47a5-a808-c1c3649473aa\") " Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.302253 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs" (OuterVolumeSpecName: "logs") pod "8a855b49-006b-47a5-a808-c1c3649473aa" (UID: "8a855b49-006b-47a5-a808-c1c3649473aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.318177 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n" (OuterVolumeSpecName: "kube-api-access-kbb6n") pod "8a855b49-006b-47a5-a808-c1c3649473aa" (UID: "8a855b49-006b-47a5-a808-c1c3649473aa"). InnerVolumeSpecName "kube-api-access-kbb6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.319561 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts" (OuterVolumeSpecName: "scripts") pod "8a855b49-006b-47a5-a808-c1c3649473aa" (UID: "8a855b49-006b-47a5-a808-c1c3649473aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.368232 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data" (OuterVolumeSpecName: "config-data") pod "8a855b49-006b-47a5-a808-c1c3649473aa" (UID: "8a855b49-006b-47a5-a808-c1c3649473aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.385235 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a855b49-006b-47a5-a808-c1c3649473aa" (UID: "8a855b49-006b-47a5-a808-c1c3649473aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.406328 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a855b49-006b-47a5-a808-c1c3649473aa-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.406359 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.406369 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbb6n\" (UniqueName: \"kubernetes.io/projected/8a855b49-006b-47a5-a808-c1c3649473aa-kube-api-access-kbb6n\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.406379 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.406391 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a855b49-006b-47a5-a808-c1c3649473aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.753530 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7nz95" event={"ID":"ae52071f-3664-4aac-8657-3351df5c6fff","Type":"ContainerStarted","Data":"8765db9bdef24aa4b3ad1ae04cc6cb698429f237b11c40b9d8c1c1e343fa2f25"} Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.787612 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerStarted","Data":"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9"} Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.787693 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-7nz95" podStartSLOduration=10.039588577 podStartE2EDuration="16.787676343s" podCreationTimestamp="2025-11-11 13:57:49 +0000 UTC" firstStartedPulling="2025-11-11 13:57:57.815582363 +0000 UTC m=+1688.475871982" lastFinishedPulling="2025-11-11 13:58:04.563670129 +0000 UTC m=+1695.223959748" observedRunningTime="2025-11-11 13:58:05.774870037 +0000 UTC m=+1696.435159656" watchObservedRunningTime="2025-11-11 13:58:05.787676343 +0000 UTC m=+1696.447965952" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.790447 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nqmw2" event={"ID":"8a855b49-006b-47a5-a808-c1c3649473aa","Type":"ContainerDied","Data":"b4b4126e6cb8ac477836d0346338eb3a71da3749a0f1e3686e60acb2c657d081"} Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.790499 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4b4126e6cb8ac477836d0346338eb3a71da3749a0f1e3686e60acb2c657d081" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.790562 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nqmw2" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.793707 4842 generic.go:334] "Generic (PLEG): container finished" podID="5ac50166-974c-400f-a954-a1c40581322c" containerID="4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543" exitCode=0 Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.795568 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerDied","Data":"4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543"} Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.795621 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerStarted","Data":"8d5bee87097ff23a11e31cb3bac403803fa2095ee6e0b66518778da6f9d96aa5"} Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.867403 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-94c6f6d9b-ns8g4"] Nov 11 13:58:05 crc kubenswrapper[4842]: E1111 13:58:05.868745 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" containerName="placement-db-sync" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.868768 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" containerName="placement-db-sync" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.869504 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" containerName="placement-db-sync" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.871732 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.883756 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.885351 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.885452 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.888936 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-ldgr6" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.889245 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 11 13:58:05 crc kubenswrapper[4842]: I1111 13:58:05.913484 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-94c6f6d9b-ns8g4"] Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020227 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-combined-ca-bundle\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020286 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-config-data\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020344 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-scripts\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020392 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-public-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020417 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2494da24-b74f-4317-8bf2-80e0335c5648-logs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020485 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-internal-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.020538 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65496\" (UniqueName: \"kubernetes.io/projected/2494da24-b74f-4317-8bf2-80e0335c5648-kube-api-access-65496\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.121898 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-combined-ca-bundle\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.121959 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-config-data\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.122000 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-scripts\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.122031 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-public-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.122061 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2494da24-b74f-4317-8bf2-80e0335c5648-logs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.122121 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-internal-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.122181 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65496\" (UniqueName: \"kubernetes.io/projected/2494da24-b74f-4317-8bf2-80e0335c5648-kube-api-access-65496\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.124510 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2494da24-b74f-4317-8bf2-80e0335c5648-logs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.132329 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-scripts\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.132763 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-internal-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.132819 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-combined-ca-bundle\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.137646 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-public-tls-certs\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.138534 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2494da24-b74f-4317-8bf2-80e0335c5648-config-data\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.143602 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65496\" (UniqueName: \"kubernetes.io/projected/2494da24-b74f-4317-8bf2-80e0335c5648-kube-api-access-65496\") pod \"placement-94c6f6d9b-ns8g4\" (UID: \"2494da24-b74f-4317-8bf2-80e0335c5648\") " pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.150311 4842 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod4c8ef126-60a0-4a3d-8d1f-8207a8fa684f"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod4c8ef126-60a0-4a3d-8d1f-8207a8fa684f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod4c8ef126_60a0_4a3d_8d1f_8207a8fa684f.slice" Nov 11 13:58:06 crc kubenswrapper[4842]: E1111 13:58:06.150416 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod4c8ef126-60a0-4a3d-8d1f-8207a8fa684f] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod4c8ef126-60a0-4a3d-8d1f-8207a8fa684f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod4c8ef126_60a0_4a3d_8d1f_8207a8fa684f.slice" pod="openstack/horizon-86c87df95f-24v9b" podUID="4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.223260 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.501682 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 11 13:58:06 crc kubenswrapper[4842]: E1111 13:58:06.563897 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9691e8b_e9db_41cf_b455_a3a9219b2d56.slice/crio-a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14.scope\": RecentStats: unable to find data in memory cache]" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.801124 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-94c6f6d9b-ns8g4"] Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.842424 4842 generic.go:334] "Generic (PLEG): container finished" podID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerID="b4ab738ee51ab22fb123e94ee08d898782bb15abe34cf3d94cf40c0fd32a4ff5" exitCode=137 Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.842482 4842 generic.go:334] "Generic (PLEG): container finished" podID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerID="3c7f7187d95201e58f7bc3338b45c3d5c3eb2547c4f721f2da0b227915a20661" exitCode=137 Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.842572 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerDied","Data":"b4ab738ee51ab22fb123e94ee08d898782bb15abe34cf3d94cf40c0fd32a4ff5"} Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.842666 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerDied","Data":"3c7f7187d95201e58f7bc3338b45c3d5c3eb2547c4f721f2da0b227915a20661"} Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.846330 4842 generic.go:334] "Generic (PLEG): container finished" podID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerID="b8fe7eae886c463be84f2e51259f1e8ee851c5acf575e0c0961ffff66aebacf5" exitCode=137 Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.846361 4842 generic.go:334] "Generic (PLEG): container finished" podID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerID="a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14" exitCode=137 Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.847239 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86c87df95f-24v9b" Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.847806 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerDied","Data":"b8fe7eae886c463be84f2e51259f1e8ee851c5acf575e0c0961ffff66aebacf5"} Nov 11 13:58:06 crc kubenswrapper[4842]: I1111 13:58:06.847841 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerDied","Data":"a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14"} Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.087555 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.110882 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.122640 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.136013 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-86c87df95f-24v9b"] Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241657 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs\") pod \"45a26782-560c-4aa9-b5cc-301abaea98ee\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241754 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key\") pod \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241792 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts\") pod \"45a26782-560c-4aa9-b5cc-301abaea98ee\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241843 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc9hp\" (UniqueName: \"kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp\") pod \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241894 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs\") pod \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241925 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6mwg\" (UniqueName: \"kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg\") pod \"45a26782-560c-4aa9-b5cc-301abaea98ee\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241955 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts\") pod \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.241991 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key\") pod \"45a26782-560c-4aa9-b5cc-301abaea98ee\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.242020 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data\") pod \"45a26782-560c-4aa9-b5cc-301abaea98ee\" (UID: \"45a26782-560c-4aa9-b5cc-301abaea98ee\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.242040 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data\") pod \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\" (UID: \"b9691e8b-e9db-41cf-b455-a3a9219b2d56\") " Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.242831 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs" (OuterVolumeSpecName: "logs") pod "b9691e8b-e9db-41cf-b455-a3a9219b2d56" (UID: "b9691e8b-e9db-41cf-b455-a3a9219b2d56"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.243211 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs" (OuterVolumeSpecName: "logs") pod "45a26782-560c-4aa9-b5cc-301abaea98ee" (UID: "45a26782-560c-4aa9-b5cc-301abaea98ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.248639 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "45a26782-560c-4aa9-b5cc-301abaea98ee" (UID: "45a26782-560c-4aa9-b5cc-301abaea98ee"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.251357 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b9691e8b-e9db-41cf-b455-a3a9219b2d56" (UID: "b9691e8b-e9db-41cf-b455-a3a9219b2d56"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.251704 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg" (OuterVolumeSpecName: "kube-api-access-r6mwg") pod "45a26782-560c-4aa9-b5cc-301abaea98ee" (UID: "45a26782-560c-4aa9-b5cc-301abaea98ee"). InnerVolumeSpecName "kube-api-access-r6mwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.254763 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp" (OuterVolumeSpecName: "kube-api-access-jc9hp") pod "b9691e8b-e9db-41cf-b455-a3a9219b2d56" (UID: "b9691e8b-e9db-41cf-b455-a3a9219b2d56"). InnerVolumeSpecName "kube-api-access-jc9hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.269741 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts" (OuterVolumeSpecName: "scripts") pod "b9691e8b-e9db-41cf-b455-a3a9219b2d56" (UID: "b9691e8b-e9db-41cf-b455-a3a9219b2d56"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.271627 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data" (OuterVolumeSpecName: "config-data") pod "b9691e8b-e9db-41cf-b455-a3a9219b2d56" (UID: "b9691e8b-e9db-41cf-b455-a3a9219b2d56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.277982 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data" (OuterVolumeSpecName: "config-data") pod "45a26782-560c-4aa9-b5cc-301abaea98ee" (UID: "45a26782-560c-4aa9-b5cc-301abaea98ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.278637 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts" (OuterVolumeSpecName: "scripts") pod "45a26782-560c-4aa9-b5cc-301abaea98ee" (UID: "45a26782-560c-4aa9-b5cc-301abaea98ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345539 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45a26782-560c-4aa9-b5cc-301abaea98ee-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345575 4842 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b9691e8b-e9db-41cf-b455-a3a9219b2d56-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345585 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345594 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc9hp\" (UniqueName: \"kubernetes.io/projected/b9691e8b-e9db-41cf-b455-a3a9219b2d56-kube-api-access-jc9hp\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345605 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9691e8b-e9db-41cf-b455-a3a9219b2d56-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345613 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6mwg\" (UniqueName: \"kubernetes.io/projected/45a26782-560c-4aa9-b5cc-301abaea98ee-kube-api-access-r6mwg\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345622 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345629 4842 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/45a26782-560c-4aa9-b5cc-301abaea98ee-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345637 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45a26782-560c-4aa9-b5cc-301abaea98ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.345644 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b9691e8b-e9db-41cf-b455-a3a9219b2d56-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.489858 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:07 crc kubenswrapper[4842]: E1111 13:58:07.490315 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490338 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: E1111 13:58:07.490352 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490358 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: E1111 13:58:07.490377 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490387 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: E1111 13:58:07.490406 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490414 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490651 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490672 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490685 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" containerName="horizon" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.490701 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" containerName="horizon-log" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.493659 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.509475 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.551618 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.551666 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4gh8\" (UniqueName: \"kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.551750 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.652885 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.653049 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.653073 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4gh8\" (UniqueName: \"kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.653382 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.653509 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.671315 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4gh8\" (UniqueName: \"kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8\") pod \"redhat-marketplace-dcgm2\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.827555 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.864839 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4d859fc9-zzhzr" event={"ID":"45a26782-560c-4aa9-b5cc-301abaea98ee","Type":"ContainerDied","Data":"981935b9d9e72e03df96b39318818c2a1e3699c2df177aff5353aeb4381ddf0a"} Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.864895 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4d859fc9-zzhzr" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.864902 4842 scope.go:117] "RemoveContainer" containerID="b4ab738ee51ab22fb123e94ee08d898782bb15abe34cf3d94cf40c0fd32a4ff5" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.872158 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c86bb574c-djp9j" Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.877403 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c86bb574c-djp9j" event={"ID":"b9691e8b-e9db-41cf-b455-a3a9219b2d56","Type":"ContainerDied","Data":"69a290d62c4c86d4749dcb595969ff01b1729fe7aa47e80a4c461f14a7c34ae2"} Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.898041 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-94c6f6d9b-ns8g4" event={"ID":"2494da24-b74f-4317-8bf2-80e0335c5648","Type":"ContainerStarted","Data":"20f7042204a1455439659bfc129b4d985912760adb2ef87511b3deb9de759eca"} Nov 11 13:58:07 crc kubenswrapper[4842]: I1111 13:58:07.898215 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-94c6f6d9b-ns8g4" event={"ID":"2494da24-b74f-4317-8bf2-80e0335c5648","Type":"ContainerStarted","Data":"50fbb466a91da1ba5aac75fec7c858d9a2f722b39bfbb0140f3bfbf1bdde344d"} Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.039549 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.055328 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f4d859fc9-zzhzr"] Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.110425 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a26782-560c-4aa9-b5cc-301abaea98ee" path="/var/lib/kubelet/pods/45a26782-560c-4aa9-b5cc-301abaea98ee/volumes" Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.111276 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c8ef126-60a0-4a3d-8d1f-8207a8fa684f" path="/var/lib/kubelet/pods/4c8ef126-60a0-4a3d-8d1f-8207a8fa684f/volumes" Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.111608 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.111637 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6c86bb574c-djp9j"] Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.347910 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.372560 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.430569 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.908805 4842 generic.go:334] "Generic (PLEG): container finished" podID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" exitCode=1 Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.908874 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerDied","Data":"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9"} Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.909664 4842 scope.go:117] "RemoveContainer" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:58:08 crc kubenswrapper[4842]: E1111 13:58:08.909998 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(643d2817-dea0-4bc0-81b1-6f83eec1d4ca)\"" pod="openstack/watcher-decision-engine-0" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" Nov 11 13:58:08 crc kubenswrapper[4842]: I1111 13:58:08.916084 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 11 13:58:10 crc kubenswrapper[4842]: I1111 13:58:10.069614 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:58:10 crc kubenswrapper[4842]: E1111 13:58:10.070894 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:58:10 crc kubenswrapper[4842]: I1111 13:58:10.071772 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9691e8b-e9db-41cf-b455-a3a9219b2d56" path="/var/lib/kubelet/pods/b9691e8b-e9db-41cf-b455-a3a9219b2d56/volumes" Nov 11 13:58:11 crc kubenswrapper[4842]: I1111 13:58:11.950500 4842 generic.go:334] "Generic (PLEG): container finished" podID="ae52071f-3664-4aac-8657-3351df5c6fff" containerID="8765db9bdef24aa4b3ad1ae04cc6cb698429f237b11c40b9d8c1c1e343fa2f25" exitCode=0 Nov 11 13:58:11 crc kubenswrapper[4842]: I1111 13:58:11.950679 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7nz95" event={"ID":"ae52071f-3664-4aac-8657-3351df5c6fff","Type":"ContainerDied","Data":"8765db9bdef24aa4b3ad1ae04cc6cb698429f237b11c40b9d8c1c1e343fa2f25"} Nov 11 13:58:14 crc kubenswrapper[4842]: I1111 13:58:14.132733 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:14 crc kubenswrapper[4842]: I1111 13:58:14.133290 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:14 crc kubenswrapper[4842]: I1111 13:58:14.134603 4842 scope.go:117] "RemoveContainer" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:58:14 crc kubenswrapper[4842]: E1111 13:58:14.134853 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(643d2817-dea0-4bc0-81b1-6f83eec1d4ca)\"" pod="openstack/watcher-decision-engine-0" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.496376 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 11 13:58:16 crc kubenswrapper[4842]: W1111 13:58:16.511917 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b55e038_2882_49b4_911c_7356d64c6352.slice/crio-42ab64bfd42000be76c91108672213b57af0c7b568ec3f6264fc05c5d3ec146a WatchSource:0}: Error finding container 42ab64bfd42000be76c91108672213b57af0c7b568ec3f6264fc05c5d3ec146a: Status 404 returned error can't find the container with id 42ab64bfd42000be76c91108672213b57af0c7b568ec3f6264fc05c5d3ec146a Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.611718 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7nz95" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.728220 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwxll\" (UniqueName: \"kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll\") pod \"ae52071f-3664-4aac-8657-3351df5c6fff\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.728596 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle\") pod \"ae52071f-3664-4aac-8657-3351df5c6fff\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.728651 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data\") pod \"ae52071f-3664-4aac-8657-3351df5c6fff\" (UID: \"ae52071f-3664-4aac-8657-3351df5c6fff\") " Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.748665 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ae52071f-3664-4aac-8657-3351df5c6fff" (UID: "ae52071f-3664-4aac-8657-3351df5c6fff"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.748739 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll" (OuterVolumeSpecName: "kube-api-access-dwxll") pod "ae52071f-3664-4aac-8657-3351df5c6fff" (UID: "ae52071f-3664-4aac-8657-3351df5c6fff"). InnerVolumeSpecName "kube-api-access-dwxll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.764116 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae52071f-3664-4aac-8657-3351df5c6fff" (UID: "ae52071f-3664-4aac-8657-3351df5c6fff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.830734 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.830769 4842 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ae52071f-3664-4aac-8657-3351df5c6fff-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.830799 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwxll\" (UniqueName: \"kubernetes.io/projected/ae52071f-3664-4aac-8657-3351df5c6fff-kube-api-access-dwxll\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.994528 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-7nz95" event={"ID":"ae52071f-3664-4aac-8657-3351df5c6fff","Type":"ContainerDied","Data":"d3d7be4e4600b9d246caa0278a8c7afe1e4351ffd5ab34590a07e820c8b30a53"} Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.994570 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-7nz95" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.994578 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3d7be4e4600b9d246caa0278a8c7afe1e4351ffd5ab34590a07e820c8b30a53" Nov 11 13:58:16 crc kubenswrapper[4842]: I1111 13:58:16.997003 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerStarted","Data":"42ab64bfd42000be76c91108672213b57af0c7b568ec3f6264fc05c5d3ec146a"} Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.831758 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-b56b7d577-bj4cr"] Nov 11 13:58:17 crc kubenswrapper[4842]: E1111 13:58:17.837700 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae52071f-3664-4aac-8657-3351df5c6fff" containerName="barbican-db-sync" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.837726 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae52071f-3664-4aac-8657-3351df5c6fff" containerName="barbican-db-sync" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.838001 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae52071f-3664-4aac-8657-3351df5c6fff" containerName="barbican-db-sync" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.839210 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.844769 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b56b7d577-bj4cr"] Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.845020 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-f6j7v" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.847543 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.852730 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.925164 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5dfc9c458b-t66x8"] Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.927876 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.938443 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.939505 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5dfc9c458b-t66x8"] Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.955641 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-logs\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.955748 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data-custom\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.955776 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.955811 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgh58\" (UniqueName: \"kubernetes.io/projected/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-kube-api-access-lgh58\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:17 crc kubenswrapper[4842]: I1111 13:58:17.955983 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-combined-ca-bundle\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.055339 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.057354 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058012 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data-custom\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058086 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgh58\" (UniqueName: \"kubernetes.io/projected/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-kube-api-access-lgh58\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058661 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058774 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-combined-ca-bundle\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058813 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-combined-ca-bundle\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058845 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-logs\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058886 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58r8t\" (UniqueName: \"kubernetes.io/projected/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-kube-api-access-58r8t\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.058932 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-logs\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.059027 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.059050 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data-custom\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.059621 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-logs\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.068332 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.068731 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-combined-ca-bundle\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.108787 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-config-data-custom\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.144488 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162009 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162127 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data-custom\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162191 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162328 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162394 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162438 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162539 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-combined-ca-bundle\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162571 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xttqh\" (UniqueName: \"kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162602 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-logs\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162643 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58r8t\" (UniqueName: \"kubernetes.io/projected/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-kube-api-access-58r8t\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.162861 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.165986 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-logs\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.167058 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data-custom\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.169613 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-config-data\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.184928 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgh58\" (UniqueName: \"kubernetes.io/projected/c1fe7c75-ea0d-41ed-b79a-7ecce3779047-kube-api-access-lgh58\") pod \"barbican-worker-b56b7d577-bj4cr\" (UID: \"c1fe7c75-ea0d-41ed-b79a-7ecce3779047\") " pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.201295 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-combined-ca-bundle\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.236877 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58r8t\" (UniqueName: \"kubernetes.io/projected/e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef-kube-api-access-58r8t\") pod \"barbican-keystone-listener-5dfc9c458b-t66x8\" (UID: \"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef\") " pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.247763 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.249501 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.252185 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.253034 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.262116 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268370 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268528 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268561 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268640 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268679 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xttqh\" (UniqueName: \"kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268739 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzr9b\" (UniqueName: \"kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268803 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268852 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268942 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.268977 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.269058 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.272589 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.272914 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.273124 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.273510 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.273842 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.306053 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xttqh\" (UniqueName: \"kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh\") pod \"dnsmasq-dns-f5bbfc8c7-rs8fv\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.371365 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.371441 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.371479 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzr9b\" (UniqueName: \"kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.371552 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.371624 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.372600 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.377894 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.387711 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.387960 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.418170 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzr9b\" (UniqueName: \"kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b\") pod \"barbican-api-8445dd8b74-6n6wt\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.431141 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.450860 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:18 crc kubenswrapper[4842]: I1111 13:58:18.476649 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b56b7d577-bj4cr" Nov 11 13:58:19 crc kubenswrapper[4842]: I1111 13:58:19.596491 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7c6ccd957-kmlcp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.672756 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-95f8fc9b8-pc2pp"] Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.674627 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.676872 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.677148 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.687457 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-95f8fc9b8-pc2pp"] Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.764731 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gqxg\" (UniqueName: \"kubernetes.io/projected/b13026a5-f118-43d9-b363-84f9ae14379c-kube-api-access-6gqxg\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.764901 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13026a5-f118-43d9-b363-84f9ae14379c-logs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.764933 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data-custom\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.764964 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-combined-ca-bundle\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.765188 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-internal-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.765229 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.765350 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-public-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.866998 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-internal-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.867376 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.867435 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-public-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.867478 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gqxg\" (UniqueName: \"kubernetes.io/projected/b13026a5-f118-43d9-b363-84f9ae14379c-kube-api-access-6gqxg\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.867940 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13026a5-f118-43d9-b363-84f9ae14379c-logs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.868371 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b13026a5-f118-43d9-b363-84f9ae14379c-logs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.869414 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data-custom\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.869472 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-combined-ca-bundle\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.872693 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-internal-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.875556 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data-custom\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.879570 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-public-tls-certs\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.880051 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-config-data\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.891542 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b13026a5-f118-43d9-b363-84f9ae14379c-combined-ca-bundle\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.914717 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gqxg\" (UniqueName: \"kubernetes.io/projected/b13026a5-f118-43d9-b363-84f9ae14379c-kube-api-access-6gqxg\") pod \"barbican-api-95f8fc9b8-pc2pp\" (UID: \"b13026a5-f118-43d9-b363-84f9ae14379c\") " pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:21 crc kubenswrapper[4842]: I1111 13:58:21.998752 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:23 crc kubenswrapper[4842]: E1111 13:58:23.466512 4842 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Nov 11 13:58:23 crc kubenswrapper[4842]: E1111 13:58:23.467044 4842 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xp976,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d8908777-1ed6-42fa-8642-5c388d9f0b4e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 11 13:58:23 crc kubenswrapper[4842]: E1111 13:58:23.468677 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.629230 4842 scope.go:117] "RemoveContainer" containerID="3c7f7187d95201e58f7bc3338b45c3d5c3eb2547c4f721f2da0b227915a20661" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.737566 4842 scope.go:117] "RemoveContainer" containerID="b8fe7eae886c463be84f2e51259f1e8ee851c5acf575e0c0961ffff66aebacf5" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.872212 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.873603 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.873701 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.877552 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-8l8nd" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.877763 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.877874 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.931900 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config-secret\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.932267 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnh6j\" (UniqueName: \"kubernetes.io/projected/414a02e6-eebe-4988-99fd-1bf1651fa858-kube-api-access-dnh6j\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.932394 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:23 crc kubenswrapper[4842]: I1111 13:58:23.932599 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-combined-ca-bundle\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.040198 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config-secret\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.040268 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnh6j\" (UniqueName: \"kubernetes.io/projected/414a02e6-eebe-4988-99fd-1bf1651fa858-kube-api-access-dnh6j\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.040298 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.040324 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-combined-ca-bundle\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.047516 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.061272 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-combined-ca-bundle\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.080750 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/414a02e6-eebe-4988-99fd-1bf1651fa858-openstack-config-secret\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.082906 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnh6j\" (UniqueName: \"kubernetes.io/projected/414a02e6-eebe-4988-99fd-1bf1651fa858-kube-api-access-dnh6j\") pod \"openstackclient\" (UID: \"414a02e6-eebe-4988-99fd-1bf1651fa858\") " pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.119883 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.135065 4842 scope.go:117] "RemoveContainer" containerID="a3350effa1849593c188722ef00592ac4f29bebef372ad1d28676bda87287c14" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.135305 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="ceilometer-notification-agent" containerID="cri-o://2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762" gracePeriod=30 Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.135531 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="sg-core" containerID="cri-o://2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133" gracePeriod=30 Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.251951 4842 scope.go:117] "RemoveContainer" containerID="42376c60ee02b8662c5a8b1696057b7eee0296cb4d5ee564c4df4b7d2cc6bce0" Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.797452 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.811840 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.825929 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-95f8fc9b8-pc2pp"] Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.843113 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5dfc9c458b-t66x8"] Nov 11 13:58:24 crc kubenswrapper[4842]: W1111 13:58:24.872477 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb13026a5_f118_43d9_b363_84f9ae14379c.slice/crio-a4b325e4e9c409836b4850713966d638eb1c210c3a381e559ee45af05f30ce31 WatchSource:0}: Error finding container a4b325e4e9c409836b4850713966d638eb1c210c3a381e559ee45af05f30ce31: Status 404 returned error can't find the container with id a4b325e4e9c409836b4850713966d638eb1c210c3a381e559ee45af05f30ce31 Nov 11 13:58:24 crc kubenswrapper[4842]: I1111 13:58:24.991986 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b56b7d577-bj4cr"] Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.001327 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.068660 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:58:25 crc kubenswrapper[4842]: E1111 13:58:25.068920 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.156210 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"414a02e6-eebe-4988-99fd-1bf1651fa858","Type":"ContainerStarted","Data":"70f9f05421375fa5a494ca08d199e99fd9047acfa405c4e4e2f0ff6e41f18746"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.169667 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" event={"ID":"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef","Type":"ContainerStarted","Data":"27175a18d631775f2a8b8dcb49cfd1643434f1047b42fd99f7b15cd85d8257d2"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.172782 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-95f8fc9b8-pc2pp" event={"ID":"b13026a5-f118-43d9-b363-84f9ae14379c","Type":"ContainerStarted","Data":"a4b325e4e9c409836b4850713966d638eb1c210c3a381e559ee45af05f30ce31"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.176713 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" event={"ID":"27b63567-242a-4867-b59e-05bb176649d7","Type":"ContainerStarted","Data":"cd1e6c575081174a4146edd11aa6e6810788ba2f165f1e8d2174aae20ae17b39"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.183688 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-94c6f6d9b-ns8g4" event={"ID":"2494da24-b74f-4317-8bf2-80e0335c5648","Type":"ContainerStarted","Data":"39b2b3158e998eb08bf301287ea965a3e62863cf32de213289e14057e0d1942b"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.184125 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.184157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.200239 4842 generic.go:334] "Generic (PLEG): container finished" podID="7b55e038-2882-49b4-911c-7356d64c6352" containerID="010fbc55d5433836d4c097969e7a97de56a2ab2d33ccad0a6224ba2716899a7d" exitCode=0 Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.200592 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerDied","Data":"010fbc55d5433836d4c097969e7a97de56a2ab2d33ccad0a6224ba2716899a7d"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.204798 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-94c6f6d9b-ns8g4" podStartSLOduration=20.204786699 podStartE2EDuration="20.204786699s" podCreationTimestamp="2025-11-11 13:58:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:25.203263771 +0000 UTC m=+1715.863553390" watchObservedRunningTime="2025-11-11 13:58:25.204786699 +0000 UTC m=+1715.865076318" Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.213972 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerStarted","Data":"8a5f848067eac6a8beb00eafe53fb4373812ed385b5f8ff9a4cda137b0dbcfec"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.227165 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b56b7d577-bj4cr" event={"ID":"c1fe7c75-ea0d-41ed-b79a-7ecce3779047","Type":"ContainerStarted","Data":"d2d6b805f706d203fd89c5d06666468bcaa2d6a9bd4f032f2cec126aaa2e9de5"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.236953 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerID="2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133" exitCode=2 Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.237041 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerDied","Data":"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133"} Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.252877 4842 generic.go:334] "Generic (PLEG): container finished" podID="5ac50166-974c-400f-a954-a1c40581322c" containerID="f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f" exitCode=0 Nov 11 13:58:25 crc kubenswrapper[4842]: I1111 13:58:25.252930 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerDied","Data":"f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.272044 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6xg8p" event={"ID":"c61f37c1-6c58-4ae1-a127-2238733058b4","Type":"ContainerStarted","Data":"9edeb81704cf367311040a89b729f5e4a6380120be9870cd114757adc14d1d96"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.273406 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-95f8fc9b8-pc2pp" event={"ID":"b13026a5-f118-43d9-b363-84f9ae14379c","Type":"ContainerStarted","Data":"aca0725e83f9748c320b3d3cdb6cbb6d500dab7ba01d5202734faf7dd3e5c32b"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.273428 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-95f8fc9b8-pc2pp" event={"ID":"b13026a5-f118-43d9-b363-84f9ae14379c","Type":"ContainerStarted","Data":"000f6ec3b2e504d04d2d35a7fcc669c41073d02525cbc7625e5ee8e9e220d0d9"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.274272 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.274297 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.278681 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r24nh" event={"ID":"69d5134b-7c5b-40d9-bcbd-a1bd368a358d","Type":"ContainerStarted","Data":"a6d2a9158406282f3f0c02371f9d1ccde982bf8e0fc880379df425ba46ee4246"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.281365 4842 generic.go:334] "Generic (PLEG): container finished" podID="27b63567-242a-4867-b59e-05bb176649d7" containerID="67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd" exitCode=0 Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.281458 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" event={"ID":"27b63567-242a-4867-b59e-05bb176649d7","Type":"ContainerDied","Data":"67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.295010 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerStarted","Data":"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.299356 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerStarted","Data":"0a92764b51bcb59507221e90e4b63b42dc3f6e6141d523c937b99936f41ce903"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.299401 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerStarted","Data":"1744fde37509b5c3a82b33ee05246a5dffe26de9f61efa360fad52731adb5354"} Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.337928 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-6xg8p" podStartSLOduration=14.724382604 podStartE2EDuration="40.337904609s" podCreationTimestamp="2025-11-11 13:57:46 +0000 UTC" firstStartedPulling="2025-11-11 13:57:58.092976361 +0000 UTC m=+1688.753265980" lastFinishedPulling="2025-11-11 13:58:23.706498366 +0000 UTC m=+1714.366787985" observedRunningTime="2025-11-11 13:58:26.290502734 +0000 UTC m=+1716.950792353" watchObservedRunningTime="2025-11-11 13:58:26.337904609 +0000 UTC m=+1716.998194238" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.339285 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-95f8fc9b8-pc2pp" podStartSLOduration=5.339270891 podStartE2EDuration="5.339270891s" podCreationTimestamp="2025-11-11 13:58:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:26.319424301 +0000 UTC m=+1716.979713930" watchObservedRunningTime="2025-11-11 13:58:26.339270891 +0000 UTC m=+1716.999560510" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.371503 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-r24nh" podStartSLOduration=14.737290585 podStartE2EDuration="1m3.371478065s" podCreationTimestamp="2025-11-11 13:57:23 +0000 UTC" firstStartedPulling="2025-11-11 13:57:35.527964814 +0000 UTC m=+1666.188254433" lastFinishedPulling="2025-11-11 13:58:24.162152294 +0000 UTC m=+1714.822441913" observedRunningTime="2025-11-11 13:58:26.347900646 +0000 UTC m=+1717.008190275" watchObservedRunningTime="2025-11-11 13:58:26.371478065 +0000 UTC m=+1717.031767684" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.403121 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qvgph" podStartSLOduration=3.455714275 podStartE2EDuration="23.403091418s" podCreationTimestamp="2025-11-11 13:58:03 +0000 UTC" firstStartedPulling="2025-11-11 13:58:05.799958714 +0000 UTC m=+1696.460248333" lastFinishedPulling="2025-11-11 13:58:25.747335857 +0000 UTC m=+1716.407625476" observedRunningTime="2025-11-11 13:58:26.389784475 +0000 UTC m=+1717.050074094" watchObservedRunningTime="2025-11-11 13:58:26.403091418 +0000 UTC m=+1717.063381037" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.416679 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-8445dd8b74-6n6wt" podStartSLOduration=8.416655739 podStartE2EDuration="8.416655739s" podCreationTimestamp="2025-11-11 13:58:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:26.413146918 +0000 UTC m=+1717.073436547" watchObservedRunningTime="2025-11-11 13:58:26.416655739 +0000 UTC m=+1717.076945358" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.496497 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6dfb9d8bf8-tjb9d" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.496617 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:58:26 crc kubenswrapper[4842]: I1111 13:58:26.953451 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:27 crc kubenswrapper[4842]: I1111 13:58:27.339448 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerStarted","Data":"2695e87204886cbd497f927abb9cefaed2a3cd267891a441fa7f6c88d61fa9af"} Nov 11 13:58:27 crc kubenswrapper[4842]: I1111 13:58:27.345968 4842 generic.go:334] "Generic (PLEG): container finished" podID="a02fa3d1-7142-425d-b514-0a647bfda1ca" containerID="ef4daab76392c0a6aeee495a68dc96653c8e12008714269f34a383934f5655c6" exitCode=0 Nov 11 13:58:27 crc kubenswrapper[4842]: I1111 13:58:27.348177 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wnl6v" event={"ID":"a02fa3d1-7142-425d-b514-0a647bfda1ca","Type":"ContainerDied","Data":"ef4daab76392c0a6aeee495a68dc96653c8e12008714269f34a383934f5655c6"} Nov 11 13:58:27 crc kubenswrapper[4842]: I1111 13:58:27.348227 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:27 crc kubenswrapper[4842]: I1111 13:58:27.348243 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.361445 4842 generic.go:334] "Generic (PLEG): container finished" podID="7b55e038-2882-49b4-911c-7356d64c6352" containerID="2695e87204886cbd497f927abb9cefaed2a3cd267891a441fa7f6c88d61fa9af" exitCode=0 Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.361737 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerDied","Data":"2695e87204886cbd497f927abb9cefaed2a3cd267891a441fa7f6c88d61fa9af"} Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.913953 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.923912 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967618 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967705 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967746 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967783 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967847 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hw2ls\" (UniqueName: \"kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls\") pod \"a02fa3d1-7142-425d-b514-0a647bfda1ca\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967896 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967942 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle\") pod \"a02fa3d1-7142-425d-b514-0a647bfda1ca\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.967984 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config\") pod \"a02fa3d1-7142-425d-b514-0a647bfda1ca\" (UID: \"a02fa3d1-7142-425d-b514-0a647bfda1ca\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.968028 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xp976\" (UniqueName: \"kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.968072 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle\") pod \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\" (UID: \"d8908777-1ed6-42fa-8642-5c388d9f0b4e\") " Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.970956 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.972481 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.979422 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls" (OuterVolumeSpecName: "kube-api-access-hw2ls") pod "a02fa3d1-7142-425d-b514-0a647bfda1ca" (UID: "a02fa3d1-7142-425d-b514-0a647bfda1ca"). InnerVolumeSpecName "kube-api-access-hw2ls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.984584 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts" (OuterVolumeSpecName: "scripts") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:28 crc kubenswrapper[4842]: I1111 13:58:28.984638 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976" (OuterVolumeSpecName: "kube-api-access-xp976") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "kube-api-access-xp976". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.049487 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.058744 4842 scope.go:117] "RemoveContainer" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069803 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xp976\" (UniqueName: \"kubernetes.io/projected/d8908777-1ed6-42fa-8642-5c388d9f0b4e-kube-api-access-xp976\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069835 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069844 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069853 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d8908777-1ed6-42fa-8642-5c388d9f0b4e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069861 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hw2ls\" (UniqueName: \"kubernetes.io/projected/a02fa3d1-7142-425d-b514-0a647bfda1ca-kube-api-access-hw2ls\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.069869 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.072130 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data" (OuterVolumeSpecName: "config-data") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.080412 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a02fa3d1-7142-425d-b514-0a647bfda1ca" (UID: "a02fa3d1-7142-425d-b514-0a647bfda1ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.082773 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8908777-1ed6-42fa-8642-5c388d9f0b4e" (UID: "d8908777-1ed6-42fa-8642-5c388d9f0b4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.111246 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config" (OuterVolumeSpecName: "config") pod "a02fa3d1-7142-425d-b514-0a647bfda1ca" (UID: "a02fa3d1-7142-425d-b514-0a647bfda1ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.172441 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.172616 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8908777-1ed6-42fa-8642-5c388d9f0b4e-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.172640 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.172654 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a02fa3d1-7142-425d-b514-0a647bfda1ca-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.372471 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" event={"ID":"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef","Type":"ContainerStarted","Data":"12fb81f471f1c3268be8e91e13ffa33cb465c307c91d6084e730e4a00931d4aa"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.372509 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" event={"ID":"e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef","Type":"ContainerStarted","Data":"70fa32a420d1ed11f8c2c893d91dacf5022cd2c86052bb2b2be07603da1c7246"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.375640 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b56b7d577-bj4cr" event={"ID":"c1fe7c75-ea0d-41ed-b79a-7ecce3779047","Type":"ContainerStarted","Data":"71add55c05af4c65d0c4b668527f0babee0b0abdf5086e0be03336e4b92d3347"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.375690 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b56b7d577-bj4cr" event={"ID":"c1fe7c75-ea0d-41ed-b79a-7ecce3779047","Type":"ContainerStarted","Data":"6cd922cfd458bfc5b766de2ee13410bb60f99a20ee311c3c606e918359582492"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.378616 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerID="2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762" exitCode=0 Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.378671 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.378689 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerDied","Data":"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.378719 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d8908777-1ed6-42fa-8642-5c388d9f0b4e","Type":"ContainerDied","Data":"8d6c0e29dc7f3ec01ce6f8279828863ea0e1eb73eb9a1f66a50a86d3b8b684d9"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.378740 4842 scope.go:117] "RemoveContainer" containerID="2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.385871 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" event={"ID":"27b63567-242a-4867-b59e-05bb176649d7","Type":"ContainerStarted","Data":"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.386057 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.390558 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5dfc9c458b-t66x8" podStartSLOduration=8.796752695 podStartE2EDuration="12.390542686s" podCreationTimestamp="2025-11-11 13:58:17 +0000 UTC" firstStartedPulling="2025-11-11 13:58:24.874196912 +0000 UTC m=+1715.534486531" lastFinishedPulling="2025-11-11 13:58:28.467986903 +0000 UTC m=+1719.128276522" observedRunningTime="2025-11-11 13:58:29.387364745 +0000 UTC m=+1720.047654364" watchObservedRunningTime="2025-11-11 13:58:29.390542686 +0000 UTC m=+1720.050832305" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.391611 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerStarted","Data":"9fc9de4d1cfd1f4a5f15b40dace6ae6609c1826d1096a6006fd1fe679076a959"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.395774 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-wnl6v" event={"ID":"a02fa3d1-7142-425d-b514-0a647bfda1ca","Type":"ContainerDied","Data":"0e8d0c9ec5ac59d7ed676a1f4a466b3a0e1b11b49db50ecd123da49edee0cdc4"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.395821 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e8d0c9ec5ac59d7ed676a1f4a466b3a0e1b11b49db50ecd123da49edee0cdc4" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.395892 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-wnl6v" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.418863 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerStarted","Data":"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d"} Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.429285 4842 scope.go:117] "RemoveContainer" containerID="2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.430016 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-b56b7d577-bj4cr" podStartSLOduration=8.987277185 podStartE2EDuration="12.430005299s" podCreationTimestamp="2025-11-11 13:58:17 +0000 UTC" firstStartedPulling="2025-11-11 13:58:25.031410434 +0000 UTC m=+1715.691700053" lastFinishedPulling="2025-11-11 13:58:28.474138558 +0000 UTC m=+1719.134428167" observedRunningTime="2025-11-11 13:58:29.418623187 +0000 UTC m=+1720.078912806" watchObservedRunningTime="2025-11-11 13:58:29.430005299 +0000 UTC m=+1720.090294918" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.463502 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" podStartSLOduration=11.463481812 podStartE2EDuration="11.463481812s" podCreationTimestamp="2025-11-11 13:58:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:29.440274054 +0000 UTC m=+1720.100563663" watchObservedRunningTime="2025-11-11 13:58:29.463481812 +0000 UTC m=+1720.123771431" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.538944 4842 scope.go:117] "RemoveContainer" containerID="2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133" Nov 11 13:58:29 crc kubenswrapper[4842]: E1111 13:58:29.539436 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133\": container with ID starting with 2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133 not found: ID does not exist" containerID="2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.539474 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133"} err="failed to get container status \"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133\": rpc error: code = NotFound desc = could not find container \"2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133\": container with ID starting with 2b6d389f99aa248b9d12550267320141e21459276525ddd9e4de596dff61b133 not found: ID does not exist" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.539495 4842 scope.go:117] "RemoveContainer" containerID="2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762" Nov 11 13:58:29 crc kubenswrapper[4842]: E1111 13:58:29.539742 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762\": container with ID starting with 2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762 not found: ID does not exist" containerID="2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.539782 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762"} err="failed to get container status \"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762\": rpc error: code = NotFound desc = could not find container \"2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762\": container with ID starting with 2450221da93ace8e2f1e7248a6466583d1396ccf280fcf6e5daba8d4968b9762 not found: ID does not exist" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.599041 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.637328 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.639476 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dcgm2" podStartSLOduration=18.953901116 podStartE2EDuration="22.639455099s" podCreationTimestamp="2025-11-11 13:58:07 +0000 UTC" firstStartedPulling="2025-11-11 13:58:25.207247808 +0000 UTC m=+1715.867537427" lastFinishedPulling="2025-11-11 13:58:28.892801791 +0000 UTC m=+1719.553091410" observedRunningTime="2025-11-11 13:58:29.547591482 +0000 UTC m=+1720.207881101" watchObservedRunningTime="2025-11-11 13:58:29.639455099 +0000 UTC m=+1720.299744718" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.673815 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:29 crc kubenswrapper[4842]: E1111 13:58:29.674315 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="sg-core" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674332 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="sg-core" Nov 11 13:58:29 crc kubenswrapper[4842]: E1111 13:58:29.674355 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="ceilometer-notification-agent" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674360 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="ceilometer-notification-agent" Nov 11 13:58:29 crc kubenswrapper[4842]: E1111 13:58:29.674383 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a02fa3d1-7142-425d-b514-0a647bfda1ca" containerName="neutron-db-sync" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674399 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a02fa3d1-7142-425d-b514-0a647bfda1ca" containerName="neutron-db-sync" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674581 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a02fa3d1-7142-425d-b514-0a647bfda1ca" containerName="neutron-db-sync" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674608 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="ceilometer-notification-agent" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.674615 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" containerName="sg-core" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.676305 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.679018 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.679357 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693163 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693321 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xstlg\" (UniqueName: \"kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693404 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693434 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693459 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693497 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.693537 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.705334 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.736250 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.767540 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.769608 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.787982 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794420 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794467 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794487 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794505 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794533 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794555 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794577 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794618 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfcp9\" (UniqueName: \"kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794657 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794681 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794738 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xstlg\" (UniqueName: \"kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.794770 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.795994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.802720 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.814370 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.819117 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.845006 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xstlg\" (UniqueName: \"kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.848741 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.860402 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.862721 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " pod="openstack/ceilometer-0" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.863952 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.870226 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.873219 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.873439 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.873665 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rqm4m" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.875620 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916253 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfcp9\" (UniqueName: \"kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916289 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916333 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916356 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4bkp\" (UniqueName: \"kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916374 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916392 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916458 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916477 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916513 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.916551 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.917205 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.917985 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.919714 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.922544 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.926324 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:29 crc kubenswrapper[4842]: I1111 13:58:29.940911 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfcp9\" (UniqueName: \"kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9\") pod \"dnsmasq-dns-5d9f8cf997-5qgd8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.018023 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.018512 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.018545 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.018570 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4bkp\" (UniqueName: \"kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.018590 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.024789 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.024850 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.029314 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.033808 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.046751 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.049786 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4bkp\" (UniqueName: \"kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp\") pod \"neutron-74c9bc975b-pgfbr\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.096245 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8908777-1ed6-42fa-8642-5c388d9f0b4e" path="/var/lib/kubelet/pods/d8908777-1ed6-42fa-8642-5c388d9f0b4e/volumes" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.096719 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.283811 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.623552 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:30 crc kubenswrapper[4842]: I1111 13:58:30.895358 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:58:31 crc kubenswrapper[4842]: I1111 13:58:31.503396 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerStarted","Data":"97b7851488cc4f30c931f383db72bd8218d550c4569782d8657375ff792d9efa"} Nov 11 13:58:31 crc kubenswrapper[4842]: I1111 13:58:31.503703 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerStarted","Data":"030c13ff1301519c3f573f5410aa11de76776ba41a640ac3b5ad0c51dc2da7a8"} Nov 11 13:58:31 crc kubenswrapper[4842]: I1111 13:58:31.525485 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="dnsmasq-dns" containerID="cri-o://ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d" gracePeriod=10 Nov 11 13:58:31 crc kubenswrapper[4842]: I1111 13:58:31.525578 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerStarted","Data":"d2f0764b3625587d32220bb3b1f59c332487fca1204ed2d3b336d9726986741e"} Nov 11 13:58:31 crc kubenswrapper[4842]: I1111 13:58:31.629811 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.312963 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.400178 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6fb8686df5-bdtrx"] Nov 11 13:58:32 crc kubenswrapper[4842]: E1111 13:58:32.401032 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="init" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.401157 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="init" Nov 11 13:58:32 crc kubenswrapper[4842]: E1111 13:58:32.401252 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="dnsmasq-dns" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.401331 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="dnsmasq-dns" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.401679 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="27b63567-242a-4867-b59e-05bb176649d7" containerName="dnsmasq-dns" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405172 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405416 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xttqh\" (UniqueName: \"kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405469 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405494 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405563 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.405668 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb\") pod \"27b63567-242a-4867-b59e-05bb176649d7\" (UID: \"27b63567-242a-4867-b59e-05bb176649d7\") " Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.414521 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.421698 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.421895 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.422028 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6fb8686df5-bdtrx"] Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.422249 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.436375 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh" (OuterVolumeSpecName: "kube-api-access-xttqh") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "kube-api-access-xttqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509362 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-internal-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509671 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-combined-ca-bundle\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509693 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-public-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509712 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc5fx\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-kube-api-access-xc5fx\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509751 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-log-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509803 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-config-data\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509818 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-run-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509847 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-etc-swift\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.509905 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xttqh\" (UniqueName: \"kubernetes.io/projected/27b63567-242a-4867-b59e-05bb176649d7-kube-api-access-xttqh\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.545318 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.562109 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerStarted","Data":"0b61c81c63b4f4d7d8c181c198a4701d2ac0bd48becea2ff040a3f2de345e58f"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.562146 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerStarted","Data":"b35f9c9810ac691025f3a1759db198999c210d20e7751ef245669628f931bb8a"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.590435 4842 generic.go:334] "Generic (PLEG): container finished" podID="27b63567-242a-4867-b59e-05bb176649d7" containerID="ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d" exitCode=0 Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.590777 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" event={"ID":"27b63567-242a-4867-b59e-05bb176649d7","Type":"ContainerDied","Data":"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.590886 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" event={"ID":"27b63567-242a-4867-b59e-05bb176649d7","Type":"ContainerDied","Data":"cd1e6c575081174a4146edd11aa6e6810788ba2f165f1e8d2174aae20ae17b39"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.592943 4842 scope.go:117] "RemoveContainer" containerID="ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.593048 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f5bbfc8c7-rs8fv" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611260 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-internal-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611307 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-combined-ca-bundle\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611346 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-public-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611369 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc5fx\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-kube-api-access-xc5fx\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611426 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-log-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611490 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-config-data\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611510 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-run-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611543 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-etc-swift\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.611627 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.618656 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-log-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.618900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65b2ac4c-d60a-4926-a3b1-88018ce9c369-run-httpd\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.644914 4842 generic.go:334] "Generic (PLEG): container finished" podID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerID="97b7851488cc4f30c931f383db72bd8218d550c4569782d8657375ff792d9efa" exitCode=0 Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.644992 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerDied","Data":"97b7851488cc4f30c931f383db72bd8218d550c4569782d8657375ff792d9efa"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.651092 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-etc-swift\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.651693 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-internal-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.652854 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-combined-ca-bundle\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.652911 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-public-tls-certs\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.659733 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65b2ac4c-d60a-4926-a3b1-88018ce9c369-config-data\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.677821 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc5fx\" (UniqueName: \"kubernetes.io/projected/65b2ac4c-d60a-4926-a3b1-88018ce9c369-kube-api-access-xc5fx\") pod \"swift-proxy-6fb8686df5-bdtrx\" (UID: \"65b2ac4c-d60a-4926-a3b1-88018ce9c369\") " pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.683035 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerStarted","Data":"c4a4b1d55d60f1941acf0b4dfbfdc80c4e0751601b8bfb1fe262e7fb682b15b5"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.683088 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerStarted","Data":"a93454a96d3547a4e19b78c80ceaca529844c18571f6831dc1ce96c19dcccd33"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.691916 4842 generic.go:334] "Generic (PLEG): container finished" podID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerID="e919adb8a44cb6ca0930a1f361a90dc108e936104f9720da3a7a49cd2d6bf57a" exitCode=137 Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.691950 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerDied","Data":"e919adb8a44cb6ca0930a1f361a90dc108e936104f9720da3a7a49cd2d6bf57a"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.691974 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfb9d8bf8-tjb9d" event={"ID":"b3246e29-bb2d-46c3-8d7f-3dec2e353e41","Type":"ContainerDied","Data":"e60d99a36a19aea64b06383a5052457cc757278a33a92f510a89fee64a8bbbf3"} Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.691986 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e60d99a36a19aea64b06383a5052457cc757278a33a92f510a89fee64a8bbbf3" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.735839 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.743040 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.782022 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config" (OuterVolumeSpecName: "config") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.799866 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "27b63567-242a-4867-b59e-05bb176649d7" (UID: "27b63567-242a-4867-b59e-05bb176649d7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.816544 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.816575 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.816586 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.816596 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27b63567-242a-4867-b59e-05bb176649d7-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.883463 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:58:32 crc kubenswrapper[4842]: I1111 13:58:32.907931 4842 scope.go:117] "RemoveContainer" containerID="67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.019883 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020314 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020352 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020377 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020414 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jwf7\" (UniqueName: \"kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020447 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.020504 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key\") pod \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\" (UID: \"b3246e29-bb2d-46c3-8d7f-3dec2e353e41\") " Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.021983 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs" (OuterVolumeSpecName: "logs") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.030886 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.037657 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7" (OuterVolumeSpecName: "kube-api-access-4jwf7") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "kube-api-access-4jwf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.049559 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.082154 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.094131 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f5bbfc8c7-rs8fv"] Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.097395 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts" (OuterVolumeSpecName: "scripts") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.114002 4842 scope.go:117] "RemoveContainer" containerID="ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d" Nov 11 13:58:33 crc kubenswrapper[4842]: E1111 13:58:33.115459 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d\": container with ID starting with ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d not found: ID does not exist" containerID="ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.115502 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d"} err="failed to get container status \"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d\": rpc error: code = NotFound desc = could not find container \"ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d\": container with ID starting with ba99f2a690c8e6d893ae2ae26cf40c6e01cb5e62396e77cff48fa1f50308db6d not found: ID does not exist" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.115524 4842 scope.go:117] "RemoveContainer" containerID="67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd" Nov 11 13:58:33 crc kubenswrapper[4842]: E1111 13:58:33.115717 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd\": container with ID starting with 67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd not found: ID does not exist" containerID="67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.115735 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd"} err="failed to get container status \"67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd\": rpc error: code = NotFound desc = could not find container \"67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd\": container with ID starting with 67c49b7c1cf466fde9913a0c0470c39ea5a4541dc8cf11c342c0cc67d4d763bd not found: ID does not exist" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.130372 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.130406 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.130416 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jwf7\" (UniqueName: \"kubernetes.io/projected/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-kube-api-access-4jwf7\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.130426 4842 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.191732 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.200601 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data" (OuterVolumeSpecName: "config-data") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.235549 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.235585 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.328704 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "b3246e29-bb2d-46c3-8d7f-3dec2e353e41" (UID: "b3246e29-bb2d-46c3-8d7f-3dec2e353e41"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.339056 4842 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3246e29-bb2d-46c3-8d7f-3dec2e353e41-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.439240 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6cbfd54f69-xg8r8"] Nov 11 13:58:33 crc kubenswrapper[4842]: E1111 13:58:33.439595 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.439611 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" Nov 11 13:58:33 crc kubenswrapper[4842]: E1111 13:58:33.439654 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon-log" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.439661 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon-log" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.439821 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon-log" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.439841 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" containerName="horizon" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.440779 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.443753 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.445754 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.452321 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cbfd54f69-xg8r8"] Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571003 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571056 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-combined-ca-bundle\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571204 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86tmg\" (UniqueName: \"kubernetes.io/projected/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-kube-api-access-86tmg\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571243 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-public-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571278 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-internal-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571319 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-ovndb-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.571363 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-httpd-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.642403 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.642495 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688664 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-combined-ca-bundle\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688723 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86tmg\" (UniqueName: \"kubernetes.io/projected/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-kube-api-access-86tmg\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688767 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-public-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688809 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-internal-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688858 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-ovndb-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688907 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-httpd-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.688968 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.697016 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-internal-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.710531 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-combined-ca-bundle\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.716794 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-public-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.722642 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-ovndb-tls-certs\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.725718 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.751889 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-httpd-config\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.785787 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.791423 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerStarted","Data":"ebacd2dbfde027adcc40b38c5a299f7647578ccbc145b7c95148214acf782986"} Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.792376 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86tmg\" (UniqueName: \"kubernetes.io/projected/18c3a0a5-32fd-44f4-8d0e-beb556aab16b-kube-api-access-86tmg\") pod \"neutron-6cbfd54f69-xg8r8\" (UID: \"18c3a0a5-32fd-44f4-8d0e-beb556aab16b\") " pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.792462 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.802317 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfb9d8bf8-tjb9d" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.805995 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerStarted","Data":"4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115"} Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.806034 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.893817 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.921546 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.951724 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-74c9bc975b-pgfbr" podStartSLOduration=4.951707642 podStartE2EDuration="4.951707642s" podCreationTimestamp="2025-11-11 13:58:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:33.945461974 +0000 UTC m=+1724.605751593" watchObservedRunningTime="2025-11-11 13:58:33.951707642 +0000 UTC m=+1724.611997261" Nov 11 13:58:33 crc kubenswrapper[4842]: I1111 13:58:33.997725 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" podStartSLOduration=4.997704133 podStartE2EDuration="4.997704133s" podCreationTimestamp="2025-11-11 13:58:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:33.990310918 +0000 UTC m=+1724.650600547" watchObservedRunningTime="2025-11-11 13:58:33.997704133 +0000 UTC m=+1724.657993752" Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.030443 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6fb8686df5-bdtrx"] Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.132395 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27b63567-242a-4867-b59e-05bb176649d7" path="/var/lib/kubelet/pods/27b63567-242a-4867-b59e-05bb176649d7/volumes" Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.133850 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.133898 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6dfb9d8bf8-tjb9d"] Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.134011 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.134068 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.228012 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.507580 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.759681 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cbfd54f69-xg8r8"] Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.850061 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6fb8686df5-bdtrx" event={"ID":"65b2ac4c-d60a-4926-a3b1-88018ce9c369","Type":"ContainerStarted","Data":"8c03387560863ee88e10315be98d902d8ec6096c7355cc19db067f00fff11632"} Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.850185 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6fb8686df5-bdtrx" event={"ID":"65b2ac4c-d60a-4926-a3b1-88018ce9c369","Type":"ContainerStarted","Data":"07117fe6a26e6abbdc6e88107e361605e504dd8ce23c93425ee9e115122a6506"} Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.861472 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cbfd54f69-xg8r8" event={"ID":"18c3a0a5-32fd-44f4-8d0e-beb556aab16b","Type":"ContainerStarted","Data":"32cb1279b94c5d22b001e78474586b7201f420505facd2c865576bd24abcd407"} Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.871487 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerStarted","Data":"a5df5b6e14e5f193bcab24ea222344661b2ea3d21273ccc3f3c60900c4d4d843"} Nov 11 13:58:34 crc kubenswrapper[4842]: I1111 13:58:34.957136 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.718616 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.903310 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerStarted","Data":"83c523d5be85b717f8ebb030d6168aa737aac7579386c6666f55e3c93a536b5c"} Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.904478 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.929150 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6fb8686df5-bdtrx" event={"ID":"65b2ac4c-d60a-4926-a3b1-88018ce9c369","Type":"ContainerStarted","Data":"bba050e88c750c4e300229debe7c191579cb905e79107922bf65f91a9ce47ed8"} Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.929262 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.929277 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.938038 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qvgph" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="registry-server" containerID="cri-o://3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c" gracePeriod=2 Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.938966 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cbfd54f69-xg8r8" event={"ID":"18c3a0a5-32fd-44f4-8d0e-beb556aab16b","Type":"ContainerStarted","Data":"1c04769dd09bcfa46a70e3aea8afed3d5430344d2d9e7dada2023eb3842b87b9"} Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.938992 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cbfd54f69-xg8r8" event={"ID":"18c3a0a5-32fd-44f4-8d0e-beb556aab16b","Type":"ContainerStarted","Data":"149e4ffed9c85dbddf6522484534497c93f76135a511282ee90374236247b7cf"} Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.939005 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.953599 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.093202359 podStartE2EDuration="6.953581756s" podCreationTimestamp="2025-11-11 13:58:29 +0000 UTC" firstStartedPulling="2025-11-11 13:58:30.642157797 +0000 UTC m=+1721.302447416" lastFinishedPulling="2025-11-11 13:58:35.502537194 +0000 UTC m=+1726.162826813" observedRunningTime="2025-11-11 13:58:35.93576484 +0000 UTC m=+1726.596054459" watchObservedRunningTime="2025-11-11 13:58:35.953581756 +0000 UTC m=+1726.613871375" Nov 11 13:58:35 crc kubenswrapper[4842]: I1111 13:58:35.988947 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6fb8686df5-bdtrx" podStartSLOduration=3.988926978 podStartE2EDuration="3.988926978s" podCreationTimestamp="2025-11-11 13:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:35.967369434 +0000 UTC m=+1726.627659053" watchObservedRunningTime="2025-11-11 13:58:35.988926978 +0000 UTC m=+1726.649216607" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.001495 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6cbfd54f69-xg8r8" podStartSLOduration=3.001477007 podStartE2EDuration="3.001477007s" podCreationTimestamp="2025-11-11 13:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:35.987303867 +0000 UTC m=+1726.647593496" watchObservedRunningTime="2025-11-11 13:58:36.001477007 +0000 UTC m=+1726.661766626" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.048527 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-95f8fc9b8-pc2pp" podUID="b13026a5-f118-43d9-b363-84f9ae14379c" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.174:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.102510 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3246e29-bb2d-46c3-8d7f-3dec2e353e41" path="/var/lib/kubelet/pods/b3246e29-bb2d-46c3-8d7f-3dec2e353e41/volumes" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.129982 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-95f8fc9b8-pc2pp" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.241669 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.241928 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" containerID="cri-o://0a92764b51bcb59507221e90e4b63b42dc3f6e6141d523c937b99936f41ce903" gracePeriod=30 Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.242123 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" containerID="cri-o://1744fde37509b5c3a82b33ee05246a5dffe26de9f61efa360fad52731adb5354" gracePeriod=30 Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.254804 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-94c6f6d9b-ns8g4" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.264523 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.264844 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.265206 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.265607 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.445421 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.445915 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": EOF" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.643461 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.896245 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.982163 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities\") pod \"5ac50166-974c-400f-a954-a1c40581322c\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.982257 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content\") pod \"5ac50166-974c-400f-a954-a1c40581322c\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.982306 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwckl\" (UniqueName: \"kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl\") pod \"5ac50166-974c-400f-a954-a1c40581322c\" (UID: \"5ac50166-974c-400f-a954-a1c40581322c\") " Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.988615 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities" (OuterVolumeSpecName: "utilities") pod "5ac50166-974c-400f-a954-a1c40581322c" (UID: "5ac50166-974c-400f-a954-a1c40581322c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.989405 4842 generic.go:334] "Generic (PLEG): container finished" podID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerID="0a92764b51bcb59507221e90e4b63b42dc3f6e6141d523c937b99936f41ce903" exitCode=143 Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.989506 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerDied","Data":"0a92764b51bcb59507221e90e4b63b42dc3f6e6141d523c937b99936f41ce903"} Nov 11 13:58:36 crc kubenswrapper[4842]: I1111 13:58:36.994352 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl" (OuterVolumeSpecName: "kube-api-access-xwckl") pod "5ac50166-974c-400f-a954-a1c40581322c" (UID: "5ac50166-974c-400f-a954-a1c40581322c"). InnerVolumeSpecName "kube-api-access-xwckl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.010175 4842 generic.go:334] "Generic (PLEG): container finished" podID="5ac50166-974c-400f-a954-a1c40581322c" containerID="3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c" exitCode=0 Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.012464 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerDied","Data":"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c"} Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.012536 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qvgph" event={"ID":"5ac50166-974c-400f-a954-a1c40581322c","Type":"ContainerDied","Data":"8d5bee87097ff23a11e31cb3bac403803fa2095ee6e0b66518778da6f9d96aa5"} Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.012574 4842 scope.go:117] "RemoveContainer" containerID="3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.013045 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qvgph" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.089382 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwckl\" (UniqueName: \"kubernetes.io/projected/5ac50166-974c-400f-a954-a1c40581322c-kube-api-access-xwckl\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.089429 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.113315 4842 scope.go:117] "RemoveContainer" containerID="f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.205380 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ac50166-974c-400f-a954-a1c40581322c" (UID: "5ac50166-974c-400f-a954-a1c40581322c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.293280 4842 scope.go:117] "RemoveContainer" containerID="4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.296885 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ac50166-974c-400f-a954-a1c40581322c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.373466 4842 scope.go:117] "RemoveContainer" containerID="3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c" Nov 11 13:58:37 crc kubenswrapper[4842]: E1111 13:58:37.375237 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c\": container with ID starting with 3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c not found: ID does not exist" containerID="3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.375279 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c"} err="failed to get container status \"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c\": rpc error: code = NotFound desc = could not find container \"3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c\": container with ID starting with 3564f92157689948e18acf20c2c0538cd3df8b2ac0085e7600698ac50ef1611c not found: ID does not exist" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.375305 4842 scope.go:117] "RemoveContainer" containerID="f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f" Nov 11 13:58:37 crc kubenswrapper[4842]: E1111 13:58:37.375645 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f\": container with ID starting with f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f not found: ID does not exist" containerID="f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.375671 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f"} err="failed to get container status \"f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f\": rpc error: code = NotFound desc = could not find container \"f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f\": container with ID starting with f105cf608515d719fb1fafcef35e5e88241901a73394ee2fafa77ad47424f91f not found: ID does not exist" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.375683 4842 scope.go:117] "RemoveContainer" containerID="4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543" Nov 11 13:58:37 crc kubenswrapper[4842]: E1111 13:58:37.376229 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543\": container with ID starting with 4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543 not found: ID does not exist" containerID="4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.376253 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543"} err="failed to get container status \"4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543\": rpc error: code = NotFound desc = could not find container \"4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543\": container with ID starting with 4737701a1296b47085802a31cf5c0b35f70a4231100e85f41ce034c2017b3543 not found: ID does not exist" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.398740 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.427533 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qvgph"] Nov 11 13:58:37 crc kubenswrapper[4842]: E1111 13:58:37.543440 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ac50166_974c_400f_a954_a1c40581322c.slice/crio-8d5bee87097ff23a11e31cb3bac403803fa2095ee6e0b66518778da6f9d96aa5\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ac50166_974c_400f_a954_a1c40581322c.slice\": RecentStats: unable to find data in memory cache]" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.828136 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:37 crc kubenswrapper[4842]: I1111 13:58:37.828172 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.019453 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-central-agent" containerID="cri-o://a93454a96d3547a4e19b78c80ceaca529844c18571f6831dc1ce96c19dcccd33" gracePeriod=30 Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.019527 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-notification-agent" containerID="cri-o://c4a4b1d55d60f1941acf0b4dfbfdc80c4e0751601b8bfb1fe262e7fb682b15b5" gracePeriod=30 Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.019560 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="sg-core" containerID="cri-o://a5df5b6e14e5f193bcab24ea222344661b2ea3d21273ccc3f3c60900c4d4d843" gracePeriod=30 Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.019539 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="proxy-httpd" containerID="cri-o://83c523d5be85b717f8ebb030d6168aa737aac7579386c6666f55e3c93a536b5c" gracePeriod=30 Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.069540 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac50166-974c-400f-a954-a1c40581322c" path="/var/lib/kubelet/pods/5ac50166-974c-400f-a954-a1c40581322c/volumes" Nov 11 13:58:38 crc kubenswrapper[4842]: I1111 13:58:38.909309 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-dcgm2" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="registry-server" probeResult="failure" output=< Nov 11 13:58:38 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 13:58:38 crc kubenswrapper[4842]: > Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033062 4842 generic.go:334] "Generic (PLEG): container finished" podID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerID="83c523d5be85b717f8ebb030d6168aa737aac7579386c6666f55e3c93a536b5c" exitCode=0 Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033114 4842 generic.go:334] "Generic (PLEG): container finished" podID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerID="a5df5b6e14e5f193bcab24ea222344661b2ea3d21273ccc3f3c60900c4d4d843" exitCode=2 Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033125 4842 generic.go:334] "Generic (PLEG): container finished" podID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerID="c4a4b1d55d60f1941acf0b4dfbfdc80c4e0751601b8bfb1fe262e7fb682b15b5" exitCode=0 Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033147 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerDied","Data":"83c523d5be85b717f8ebb030d6168aa737aac7579386c6666f55e3c93a536b5c"} Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033175 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerDied","Data":"a5df5b6e14e5f193bcab24ea222344661b2ea3d21273ccc3f3c60900c4d4d843"} Nov 11 13:58:39 crc kubenswrapper[4842]: I1111 13:58:39.033186 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerDied","Data":"c4a4b1d55d60f1941acf0b4dfbfdc80c4e0751601b8bfb1fe262e7fb682b15b5"} Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.051728 4842 generic.go:334] "Generic (PLEG): container finished" podID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" containerID="a6d2a9158406282f3f0c02371f9d1ccde982bf8e0fc880379df425ba46ee4246" exitCode=0 Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.051772 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r24nh" event={"ID":"69d5134b-7c5b-40d9-bcbd-a1bd368a358d","Type":"ContainerDied","Data":"a6d2a9158406282f3f0c02371f9d1ccde982bf8e0fc880379df425ba46ee4246"} Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.059588 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:58:40 crc kubenswrapper[4842]: E1111 13:58:40.059790 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.098295 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.201879 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:58:40 crc kubenswrapper[4842]: I1111 13:58:40.202136 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="dnsmasq-dns" containerID="cri-o://486feb2a68cc8d9e4e864555f55fd3c9a846bb6700c5860e4f0c98309535e8e1" gracePeriod=10 Nov 11 13:58:41 crc kubenswrapper[4842]: I1111 13:58:41.069708 4842 generic.go:334] "Generic (PLEG): container finished" podID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerID="a93454a96d3547a4e19b78c80ceaca529844c18571f6831dc1ce96c19dcccd33" exitCode=0 Nov 11 13:58:41 crc kubenswrapper[4842]: I1111 13:58:41.069796 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerDied","Data":"a93454a96d3547a4e19b78c80ceaca529844c18571f6831dc1ce96c19dcccd33"} Nov 11 13:58:41 crc kubenswrapper[4842]: I1111 13:58:41.079939 4842 generic.go:334] "Generic (PLEG): container finished" podID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerID="486feb2a68cc8d9e4e864555f55fd3c9a846bb6700c5860e4f0c98309535e8e1" exitCode=0 Nov 11 13:58:41 crc kubenswrapper[4842]: I1111 13:58:41.080031 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" event={"ID":"d47bc4a2-0636-4a23-b904-4ca118e84e05","Type":"ContainerDied","Data":"486feb2a68cc8d9e4e864555f55fd3c9a846bb6700c5860e4f0c98309535e8e1"} Nov 11 13:58:42 crc kubenswrapper[4842]: I1111 13:58:42.150681 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": read tcp 10.217.0.2:53770->10.217.0.173:9311: read: connection reset by peer" Nov 11 13:58:42 crc kubenswrapper[4842]: I1111 13:58:42.152032 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": read tcp 10.217.0.2:53754->10.217.0.173:9311: read: connection reset by peer" Nov 11 13:58:42 crc kubenswrapper[4842]: I1111 13:58:42.644431 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.037668 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.039952 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6fb8686df5-bdtrx" Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.136452 4842 generic.go:334] "Generic (PLEG): container finished" podID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerID="1744fde37509b5c3a82b33ee05246a5dffe26de9f61efa360fad52731adb5354" exitCode=0 Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.136559 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerDied","Data":"1744fde37509b5c3a82b33ee05246a5dffe26de9f61efa360fad52731adb5354"} Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.452065 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": dial tcp 10.217.0.173:9311: connect: connection refused" Nov 11 13:58:43 crc kubenswrapper[4842]: I1111 13:58:43.452084 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-8445dd8b74-6n6wt" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.173:9311/healthcheck\": dial tcp 10.217.0.173:9311: connect: connection refused" Nov 11 13:58:44 crc kubenswrapper[4842]: I1111 13:58:44.155325 4842 generic.go:334] "Generic (PLEG): container finished" podID="c61f37c1-6c58-4ae1-a127-2238733058b4" containerID="9edeb81704cf367311040a89b729f5e4a6380120be9870cd114757adc14d1d96" exitCode=0 Nov 11 13:58:44 crc kubenswrapper[4842]: I1111 13:58:44.155502 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6xg8p" event={"ID":"c61f37c1-6c58-4ae1-a127-2238733058b4","Type":"ContainerDied","Data":"9edeb81704cf367311040a89b729f5e4a6380120be9870cd114757adc14d1d96"} Nov 11 13:58:45 crc kubenswrapper[4842]: I1111 13:58:45.933623 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r24nh" Nov 11 13:58:45 crc kubenswrapper[4842]: I1111 13:58:45.947676 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6xg8p" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.017569 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032112 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-dvshs"] Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032574 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="proxy-httpd" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032593 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="proxy-httpd" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032611 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="extract-utilities" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032618 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="extract-utilities" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032629 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-notification-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032635 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-notification-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032646 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c61f37c1-6c58-4ae1-a127-2238733058b4" containerName="glance-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032652 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c61f37c1-6c58-4ae1-a127-2238733058b4" containerName="glance-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032662 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="sg-core" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032669 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="sg-core" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032680 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="extract-content" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032686 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="extract-content" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032693 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-central-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032699 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-central-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032708 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" containerName="cinder-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032715 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" containerName="cinder-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.032725 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="registry-server" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032731 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="registry-server" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032934 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="sg-core" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032948 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c61f37c1-6c58-4ae1-a127-2238733058b4" containerName="glance-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032956 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" containerName="cinder-db-sync" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032964 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-notification-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032977 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac50166-974c-400f-a954-a1c40581322c" containerName="registry-server" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032987 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="proxy-httpd" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.032997 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" containerName="ceilometer-central-agent" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.033681 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.075468 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.075514 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle\") pod \"c61f37c1-6c58-4ae1-a127-2238733058b4\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.075552 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm9cm\" (UniqueName: \"kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm\") pod \"c61f37c1-6c58-4ae1-a127-2238733058b4\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077144 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ph7j\" (UniqueName: \"kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077173 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data\") pod \"c61f37c1-6c58-4ae1-a127-2238733058b4\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077224 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077266 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077579 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.077625 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data\") pod \"c61f37c1-6c58-4ae1-a127-2238733058b4\" (UID: \"c61f37c1-6c58-4ae1-a127-2238733058b4\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.079568 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle\") pod \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\" (UID: \"69d5134b-7c5b-40d9-bcbd-a1bd368a358d\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.084945 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.124148 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm" (OuterVolumeSpecName: "kube-api-access-vm9cm") pod "c61f37c1-6c58-4ae1-a127-2238733058b4" (UID: "c61f37c1-6c58-4ae1-a127-2238733058b4"). InnerVolumeSpecName "kube-api-access-vm9cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.132344 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dvshs"] Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.136742 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.140487 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c61f37c1-6c58-4ae1-a127-2238733058b4" (UID: "c61f37c1-6c58-4ae1-a127-2238733058b4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.140895 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j" (OuterVolumeSpecName: "kube-api-access-2ph7j") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "kube-api-access-2ph7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.148755 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts" (OuterVolumeSpecName: "scripts") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.180115 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d1692951-e3c3-4c77-b192-53bb2a8956d0","Type":"ContainerDied","Data":"d2f0764b3625587d32220bb3b1f59c332487fca1204ed2d3b336d9726986741e"} Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.180168 4842 scope.go:117] "RemoveContainer" containerID="83c523d5be85b717f8ebb030d6168aa737aac7579386c6666f55e3c93a536b5c" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.180282 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.181598 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.181653 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.181682 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xstlg\" (UniqueName: \"kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.181716 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.182962 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183022 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183193 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd\") pod \"d1692951-e3c3-4c77-b192-53bb2a8956d0\" (UID: \"d1692951-e3c3-4c77-b192-53bb2a8956d0\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183459 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7v88\" (UniqueName: \"kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88\") pod \"nova-api-db-create-dvshs\" (UID: \"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365\") " pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183756 4842 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183772 4842 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183784 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm9cm\" (UniqueName: \"kubernetes.io/projected/c61f37c1-6c58-4ae1-a127-2238733058b4-kube-api-access-vm9cm\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183800 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ph7j\" (UniqueName: \"kubernetes.io/projected/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-kube-api-access-2ph7j\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183811 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.183822 4842 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.187324 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.187585 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6xg8p" event={"ID":"c61f37c1-6c58-4ae1-a127-2238733058b4","Type":"ContainerDied","Data":"6f0ae3796c9f78836a11f19c2009384e8cc7e4d3d083c5c613612346aa99306e"} Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.187611 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f0ae3796c9f78836a11f19c2009384e8cc7e4d3d083c5c613612346aa99306e" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.187668 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6xg8p" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.189886 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.199516 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r24nh" event={"ID":"69d5134b-7c5b-40d9-bcbd-a1bd368a358d","Type":"ContainerDied","Data":"038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b"} Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.199559 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="038a90b070bd062612f2ea3c95c944e00423d90341982b1fa7a242511d7e2a3b" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.199616 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r24nh" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.203697 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-lqchx"] Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.205440 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.206272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg" (OuterVolumeSpecName: "kube-api-access-xstlg") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "kube-api-access-xstlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.210572 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts" (OuterVolumeSpecName: "scripts") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.211272 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lqchx"] Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.212353 4842 scope.go:117] "RemoveContainer" containerID="a5df5b6e14e5f193bcab24ea222344661b2ea3d21273ccc3f3c60900c4d4d843" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.238674 4842 scope.go:117] "RemoveContainer" containerID="c4a4b1d55d60f1941acf0b4dfbfdc80c4e0751601b8bfb1fe262e7fb682b15b5" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.243293 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.255925 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.280402 4842 scope.go:117] "RemoveContainer" containerID="a93454a96d3547a4e19b78c80ceaca529844c18571f6831dc1ce96c19dcccd33" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285183 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7v88\" (UniqueName: \"kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88\") pod \"nova-api-db-create-dvshs\" (UID: \"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365\") " pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285365 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jdlq\" (UniqueName: \"kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq\") pod \"nova-cell0-db-create-lqchx\" (UID: \"5c9219a3-683a-4aec-b1a5-017efe925201\") " pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285417 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xstlg\" (UniqueName: \"kubernetes.io/projected/d1692951-e3c3-4c77-b192-53bb2a8956d0-kube-api-access-xstlg\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285429 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285440 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.285448 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1692951-e3c3-4c77-b192-53bb2a8956d0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.307606 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-nwc96"] Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.308023 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="init" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308040 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="init" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.308062 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308068 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.308078 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="dnsmasq-dns" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308085 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="dnsmasq-dns" Nov 11 13:58:46 crc kubenswrapper[4842]: E1111 13:58:46.308117 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308124 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308302 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308330 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" containerName="barbican-api-log" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308342 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" containerName="dnsmasq-dns" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.308988 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.309754 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7v88\" (UniqueName: \"kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88\") pod \"nova-api-db-create-dvshs\" (UID: \"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365\") " pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.340127 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-nwc96"] Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.391656 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.391668 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzr9b\" (UniqueName: \"kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b\") pod \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392146 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs\") pod \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392199 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392323 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392352 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392397 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6v44\" (UniqueName: \"kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392421 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392440 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle\") pod \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392461 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0\") pod \"d47bc4a2-0636-4a23-b904-4ca118e84e05\" (UID: \"d47bc4a2-0636-4a23-b904-4ca118e84e05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392497 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom\") pod \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392529 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data\") pod \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\" (UID: \"425bf7e3-ef65-4d59-9e5c-ba44a8333b05\") " Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392819 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzmkr\" (UniqueName: \"kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr\") pod \"nova-cell1-db-create-nwc96\" (UID: \"e35c9893-2858-44cb-a754-0aae7ca251ef\") " pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.392871 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jdlq\" (UniqueName: \"kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq\") pod \"nova-cell0-db-create-lqchx\" (UID: \"5c9219a3-683a-4aec-b1a5-017efe925201\") " pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.395174 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs" (OuterVolumeSpecName: "logs") pod "425bf7e3-ef65-4d59-9e5c-ba44a8333b05" (UID: "425bf7e3-ef65-4d59-9e5c-ba44a8333b05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.434797 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jdlq\" (UniqueName: \"kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq\") pod \"nova-cell0-db-create-lqchx\" (UID: \"5c9219a3-683a-4aec-b1a5-017efe925201\") " pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.447013 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b" (OuterVolumeSpecName: "kube-api-access-hzr9b") pod "425bf7e3-ef65-4d59-9e5c-ba44a8333b05" (UID: "425bf7e3-ef65-4d59-9e5c-ba44a8333b05"). InnerVolumeSpecName "kube-api-access-hzr9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.499272 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzmkr\" (UniqueName: \"kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr\") pod \"nova-cell1-db-create-nwc96\" (UID: \"e35c9893-2858-44cb-a754-0aae7ca251ef\") " pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.499648 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzr9b\" (UniqueName: \"kubernetes.io/projected/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-kube-api-access-hzr9b\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.499662 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.527854 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.548115 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "425bf7e3-ef65-4d59-9e5c-ba44a8333b05" (UID: "425bf7e3-ef65-4d59-9e5c-ba44a8333b05"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.556327 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.558543 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44" (OuterVolumeSpecName: "kube-api-access-k6v44") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "kube-api-access-k6v44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.558900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzmkr\" (UniqueName: \"kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr\") pod \"nova-cell1-db-create-nwc96\" (UID: \"e35c9893-2858-44cb-a754-0aae7ca251ef\") " pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.650993 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6v44\" (UniqueName: \"kubernetes.io/projected/d47bc4a2-0636-4a23-b904-4ca118e84e05-kube-api-access-k6v44\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.651022 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.651045 4842 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.665077 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.689420 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.755089 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.795393 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "425bf7e3-ef65-4d59-9e5c-ba44a8333b05" (UID: "425bf7e3-ef65-4d59-9e5c-ba44a8333b05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.825229 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c61f37c1-6c58-4ae1-a127-2238733058b4" (UID: "c61f37c1-6c58-4ae1-a127-2238733058b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.867059 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.867118 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.974466 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f586658c5-6fqht"] Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.985141 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:46 crc kubenswrapper[4842]: I1111 13:58:46.992719 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data" (OuterVolumeSpecName: "config-data") pod "c61f37c1-6c58-4ae1-a127-2238733058b4" (UID: "c61f37c1-6c58-4ae1-a127-2238733058b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.007228 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f586658c5-6fqht"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.073582 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c61f37c1-6c58-4ae1-a127-2238733058b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.143299 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data" (OuterVolumeSpecName: "config-data") pod "425bf7e3-ef65-4d59-9e5c-ba44a8333b05" (UID: "425bf7e3-ef65-4d59-9e5c-ba44a8333b05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.152340 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175538 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175622 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175674 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnwnt\" (UniqueName: \"kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175733 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175758 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175804 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175894 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.175905 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425bf7e3-ef65-4d59-9e5c-ba44a8333b05-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.228290 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"414a02e6-eebe-4988-99fd-1bf1651fa858","Type":"ContainerStarted","Data":"f0dbd43e30076187ff0635bff2e89c9b9f34eb26feacf575fefd8620bf90f0c3"} Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.232271 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" event={"ID":"d47bc4a2-0636-4a23-b904-4ca118e84e05","Type":"ContainerDied","Data":"85889397f38b61dc9a2eebf4ddd9c65a6a2769cb833ea0f63feb36087b12ee8c"} Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.232479 4842 scope.go:117] "RemoveContainer" containerID="486feb2a68cc8d9e4e864555f55fd3c9a846bb6700c5860e4f0c98309535e8e1" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.232673 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85d7795cdc-rprf5" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.237793 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-8445dd8b74-6n6wt" event={"ID":"425bf7e3-ef65-4d59-9e5c-ba44a8333b05","Type":"ContainerDied","Data":"8a5f848067eac6a8beb00eafe53fb4373812ed385b5f8ff9a4cda137b0dbcfec"} Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.237960 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-8445dd8b74-6n6wt" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289496 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnwnt\" (UniqueName: \"kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289550 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289574 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289619 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289682 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.289717 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.290529 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.293143 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.294356 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.294952 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.296810 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.307142 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config" (OuterVolumeSpecName: "config") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.344707 4842 scope.go:117] "RemoveContainer" containerID="2137bbbb1af3fb8f914150712f4f01562635606d1a7cb5b7e1b72ba405a126b6" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.356605 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.356670 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f586658c5-6fqht"] Nov 11 13:58:47 crc kubenswrapper[4842]: E1111 13:58:47.357441 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-lnwnt], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-f586658c5-6fqht" podUID="6537267a-d2fe-47ed-b7f1-cb1829a84f03" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.363954 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.366087 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.710748176 podStartE2EDuration="24.366067836s" podCreationTimestamp="2025-11-11 13:58:23 +0000 UTC" firstStartedPulling="2025-11-11 13:58:25.03191445 +0000 UTC m=+1715.692204069" lastFinishedPulling="2025-11-11 13:58:45.68723411 +0000 UTC m=+1736.347523729" observedRunningTime="2025-11-11 13:58:47.347200908 +0000 UTC m=+1738.007490537" watchObservedRunningTime="2025-11-11 13:58:47.366067836 +0000 UTC m=+1738.026357455" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.386674 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data" (OuterVolumeSpecName: "config-data") pod "69d5134b-7c5b-40d9-bcbd-a1bd368a358d" (UID: "69d5134b-7c5b-40d9-bcbd-a1bd368a358d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.386822 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnwnt\" (UniqueName: \"kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt\") pod \"dnsmasq-dns-f586658c5-6fqht\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.397294 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.397333 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.397346 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.397360 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d5134b-7c5b-40d9-bcbd-a1bd368a358d-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.417986 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.433743 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d47bc4a2-0636-4a23-b904-4ca118e84e05" (UID: "d47bc4a2-0636-4a23-b904-4ca118e84e05"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.446848 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data" (OuterVolumeSpecName: "config-data") pod "d1692951-e3c3-4c77-b192-53bb2a8956d0" (UID: "d1692951-e3c3-4c77-b192-53bb2a8956d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.462996 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.465528 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.470045 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.470223 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-slnb6" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.470331 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.470431 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.483197 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.498483 4842 scope.go:117] "RemoveContainer" containerID="1744fde37509b5c3a82b33ee05246a5dffe26de9f61efa360fad52731adb5354" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.499352 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.500148 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d47bc4a2-0636-4a23-b904-4ca118e84e05-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.500172 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1692951-e3c3-4c77-b192-53bb2a8956d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.513909 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.530725 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.531202 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.538012 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.566970 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-8445dd8b74-6n6wt"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.568952 4842 scope.go:117] "RemoveContainer" containerID="0a92764b51bcb59507221e90e4b63b42dc3f6e6141d523c937b99936f41ce903" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.594848 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dvshs"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.604970 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.605303 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.605372 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.605448 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.605499 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njnq8\" (UniqueName: \"kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.605515 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: W1111 13:58:47.646809 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c9219a3_683a_4aec_b1a5_017efe925201.slice/crio-a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938 WatchSource:0}: Error finding container a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938: Status 404 returned error can't find the container with id a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938 Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.662355 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.664392 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.666774 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.687392 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.711499 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.711581 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.711627 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.711664 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb8l9\" (UniqueName: \"kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712126 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712187 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712285 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712340 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njnq8\" (UniqueName: \"kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712358 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712407 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712432 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.712454 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.724532 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.725535 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.733838 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.738842 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.755995 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njnq8\" (UniqueName: \"kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.773995 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lqchx"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.774453 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.799263 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.801610 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.805947 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rzk8r" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.807333 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.808402 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814807 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814861 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814882 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814919 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814935 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.814952 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815009 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815052 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815078 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815110 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbclb\" (UniqueName: \"kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815129 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb8l9\" (UniqueName: \"kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815144 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.815166 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.816139 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.816971 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.817026 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.817628 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.817921 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.830379 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.830510 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.851410 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb8l9\" (UniqueName: \"kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9\") pod \"dnsmasq-dns-586fb9b84f-djjqv\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.858218 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.901444 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85d7795cdc-rprf5"] Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918236 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918291 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbclb\" (UniqueName: \"kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918327 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h86wc\" (UniqueName: \"kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918358 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918405 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918432 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918458 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918490 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918516 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918557 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918588 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918647 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918682 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.918707 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.924821 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.931530 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.939225 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.941945 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.945554 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.951805 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.955275 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbclb\" (UniqueName: \"kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.957032 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data\") pod \"cinder-api-0\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " pod="openstack/cinder-api-0" Nov 11 13:58:47 crc kubenswrapper[4842]: I1111 13:58:47.958336 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-nwc96"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023230 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h86wc\" (UniqueName: \"kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023325 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023382 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023416 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023451 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023495 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023515 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.023626 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.024065 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.024834 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.026152 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.028553 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.030977 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.034792 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.042486 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.055061 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h86wc\" (UniqueName: \"kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.055273 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.089327 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="425bf7e3-ef65-4d59-9e5c-ba44a8333b05" path="/var/lib/kubelet/pods/425bf7e3-ef65-4d59-9e5c-ba44a8333b05/volumes" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.090394 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.091045 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1692951-e3c3-4c77-b192-53bb2a8956d0" path="/var/lib/kubelet/pods/d1692951-e3c3-4c77-b192-53bb2a8956d0/volumes" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.097718 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d47bc4a2-0636-4a23-b904-4ca118e84e05" path="/var/lib/kubelet/pods/d47bc4a2-0636-4a23-b904-4ca118e84e05/volumes" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.106323 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.121457 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.121613 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.123646 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.123758 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.128806 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.128999 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.129234 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.129743 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.130509 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.132574 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:48 crc kubenswrapper[4842]: E1111 13:58:48.166550 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd47bc4a2_0636_4a23_b904_4ca118e84e05.slice/crio-85889397f38b61dc9a2eebf4ddd9c65a6a2769cb833ea0f63feb36087b12ee8c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd47bc4a2_0636_4a23_b904_4ca118e84e05.slice\": RecentStats: unable to find data in memory cache]" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.197906 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.212030 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227393 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227445 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227467 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227496 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227514 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pkkj\" (UniqueName: \"kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227531 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227566 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227580 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227605 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227625 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rptmt\" (UniqueName: \"kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227641 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227655 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227679 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.227709 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.329937 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330010 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330038 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330074 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330118 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pkkj\" (UniqueName: \"kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330145 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330194 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330214 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330250 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330280 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rptmt\" (UniqueName: \"kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330323 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330360 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.330400 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.334806 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.337355 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.338135 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.338172 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.338408 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.342743 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.345493 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.346016 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.347196 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lqchx" event={"ID":"5c9219a3-683a-4aec-b1a5-017efe925201","Type":"ContainerStarted","Data":"e6f8ae81dfc1be49ad74310f51ce8735a6cffc868610c81cdaf1dfe3180bd5a2"} Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.347258 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lqchx" event={"ID":"5c9219a3-683a-4aec-b1a5-017efe925201","Type":"ContainerStarted","Data":"a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938"} Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.353157 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.353591 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.356981 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.363971 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nwc96" event={"ID":"e35c9893-2858-44cb-a754-0aae7ca251ef","Type":"ContainerStarted","Data":"4376582dfa2f53553f9241207f1ee46217fc930557bab685364e9f1aabc97086"} Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.366086 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.381175 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dvshs" event={"ID":"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365","Type":"ContainerStarted","Data":"d205021a04359c1f6f62dfb28b9d8bdb1acb380a341e2af7013d953af97ab3dc"} Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.381223 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dvshs" event={"ID":"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365","Type":"ContainerStarted","Data":"404717c884a35d7d49e212fc81708f3c46dacc874eeef9d4d49853426ac6097e"} Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.384037 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.402662 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rptmt\" (UniqueName: \"kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt\") pod \"ceilometer-0\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.403664 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pkkj\" (UniqueName: \"kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.406871 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.413118 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-nwc96" podStartSLOduration=2.413087152 podStartE2EDuration="2.413087152s" podCreationTimestamp="2025-11-11 13:58:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:48.38499779 +0000 UTC m=+1739.045287429" watchObservedRunningTime="2025-11-11 13:58:48.413087152 +0000 UTC m=+1739.073376771" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.426474 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.459080 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.485894 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538217 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538320 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538350 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538382 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnwnt\" (UniqueName: \"kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538431 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.538678 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0\") pod \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\" (UID: \"6537267a-d2fe-47ed-b7f1-cb1829a84f03\") " Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.540168 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.540588 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.540927 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.542220 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config" (OuterVolumeSpecName: "config") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.547176 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.558462 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt" (OuterVolumeSpecName: "kube-api-access-lnwnt") pod "6537267a-d2fe-47ed-b7f1-cb1829a84f03" (UID: "6537267a-d2fe-47ed-b7f1-cb1829a84f03"). InnerVolumeSpecName "kube-api-access-lnwnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.601080 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642694 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642735 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642747 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642760 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnwnt\" (UniqueName: \"kubernetes.io/projected/6537267a-d2fe-47ed-b7f1-cb1829a84f03-kube-api-access-lnwnt\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642770 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.642781 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6537267a-d2fe-47ed-b7f1-cb1829a84f03-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.856814 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:58:48 crc kubenswrapper[4842]: I1111 13:58:48.871242 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.254194 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.258559 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:49 crc kubenswrapper[4842]: W1111 13:58:49.269716 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a28beec_6048_4e82_b36c_963af4b4c20d.slice/crio-8979af75d9a49d223baf79a5c58bbb76254596a6a7c078c3dcbfdf65f4a8f3da WatchSource:0}: Error finding container 8979af75d9a49d223baf79a5c58bbb76254596a6a7c078c3dcbfdf65f4a8f3da: Status 404 returned error can't find the container with id 8979af75d9a49d223baf79a5c58bbb76254596a6a7c078c3dcbfdf65f4a8f3da Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.328576 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.373174 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.414983 4842 generic.go:334] "Generic (PLEG): container finished" podID="fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" containerID="d205021a04359c1f6f62dfb28b9d8bdb1acb380a341e2af7013d953af97ab3dc" exitCode=0 Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.415072 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dvshs" event={"ID":"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365","Type":"ContainerDied","Data":"d205021a04359c1f6f62dfb28b9d8bdb1acb380a341e2af7013d953af97ab3dc"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.417162 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerStarted","Data":"80a887c8a770c1e66cd5e2cf1f5f72fa5c2f3f0bdb761cfaec5951a3d960ba0e"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.421197 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerStarted","Data":"2099521672ca3f224680d9c2337777125b159c910bc52ffa1da778f2c11c9a41"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.432418 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerStarted","Data":"8979af75d9a49d223baf79a5c58bbb76254596a6a7c078c3dcbfdf65f4a8f3da"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.435174 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerStarted","Data":"228800069bc35bcee224577f2aa2cbc16aeacd40e9f64bd7159a7108f377481e"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.436583 4842 generic.go:334] "Generic (PLEG): container finished" podID="e35c9893-2858-44cb-a754-0aae7ca251ef" containerID="3adcedbcd834e3507ffafa2601772236fde16cc6021a523303c88ac00e5e9f5f" exitCode=0 Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.436640 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nwc96" event={"ID":"e35c9893-2858-44cb-a754-0aae7ca251ef","Type":"ContainerDied","Data":"3adcedbcd834e3507ffafa2601772236fde16cc6021a523303c88ac00e5e9f5f"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.441889 4842 generic.go:334] "Generic (PLEG): container finished" podID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerID="fbbbac5ff73c19ce3ebd43bbf71619895fa03a11c55bce2d04e7759dab3ac95e" exitCode=0 Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.441975 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" event={"ID":"85d010dc-9a24-4773-bfa5-07d453eb0ab4","Type":"ContainerDied","Data":"fbbbac5ff73c19ce3ebd43bbf71619895fa03a11c55bce2d04e7759dab3ac95e"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.442007 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" event={"ID":"85d010dc-9a24-4773-bfa5-07d453eb0ab4","Type":"ContainerStarted","Data":"e51d4143494f348e7f9cb9d2a674e092e9c5a6a5a01e690a90da3cb2c1ecf6f8"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.458508 4842 generic.go:334] "Generic (PLEG): container finished" podID="5c9219a3-683a-4aec-b1a5-017efe925201" containerID="e6f8ae81dfc1be49ad74310f51ce8735a6cffc868610c81cdaf1dfe3180bd5a2" exitCode=0 Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.458591 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lqchx" event={"ID":"5c9219a3-683a-4aec-b1a5-017efe925201","Type":"ContainerDied","Data":"e6f8ae81dfc1be49ad74310f51ce8735a6cffc868610c81cdaf1dfe3180bd5a2"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.464708 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f586658c5-6fqht" Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.465074 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerStarted","Data":"4193b8d3bbb3c6230f2989fb3e6ff84b7720f3a2b4336321941db81e70bbfc11"} Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.465362 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dcgm2" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="registry-server" containerID="cri-o://9fc9de4d1cfd1f4a5f15b40dace6ae6609c1826d1096a6006fd1fe679076a959" gracePeriod=2 Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.570378 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f586658c5-6fqht"] Nov 11 13:58:49 crc kubenswrapper[4842]: I1111 13:58:49.587534 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f586658c5-6fqht"] Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.130400 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6537267a-d2fe-47ed-b7f1-cb1829a84f03" path="/var/lib/kubelet/pods/6537267a-d2fe-47ed-b7f1-cb1829a84f03/volumes" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.204651 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.216457 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7v88\" (UniqueName: \"kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88\") pod \"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365\" (UID: \"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365\") " Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.231436 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88" (OuterVolumeSpecName: "kube-api-access-g7v88") pod "fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" (UID: "fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365"). InnerVolumeSpecName "kube-api-access-g7v88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.321487 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7v88\" (UniqueName: \"kubernetes.io/projected/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365-kube-api-access-g7v88\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.574112 4842 generic.go:334] "Generic (PLEG): container finished" podID="7b55e038-2882-49b4-911c-7356d64c6352" containerID="9fc9de4d1cfd1f4a5f15b40dace6ae6609c1826d1096a6006fd1fe679076a959" exitCode=0 Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.574214 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerDied","Data":"9fc9de4d1cfd1f4a5f15b40dace6ae6609c1826d1096a6006fd1fe679076a959"} Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.614522 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" event={"ID":"85d010dc-9a24-4773-bfa5-07d453eb0ab4","Type":"ContainerStarted","Data":"ae523ba92374e98b41f2d246d915e2b3b7b6393469dd3fe27205f33a0543611a"} Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.614812 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.627077 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerStarted","Data":"22894b63708b39a784ffb4ed886e2a358dcf42c63efd7174e0b7cddd4ff50a37"} Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.634232 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerStarted","Data":"60ae14b45d1eb4f9fdb276f0faa4c83ce5df64748ad12fb3bd4ffa204e39059b"} Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.643538 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.656633 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" podStartSLOduration=3.6566128989999998 podStartE2EDuration="3.656612899s" podCreationTimestamp="2025-11-11 13:58:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:50.651506707 +0000 UTC m=+1741.311796326" watchObservedRunningTime="2025-11-11 13:58:50.656612899 +0000 UTC m=+1741.316902528" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.688274 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dvshs" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.695364 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dvshs" event={"ID":"fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365","Type":"ContainerDied","Data":"404717c884a35d7d49e212fc81708f3c46dacc874eeef9d4d49853426ac6097e"} Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.695449 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="404717c884a35d7d49e212fc81708f3c46dacc874eeef9d4d49853426ac6097e" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.765571 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content\") pod \"7b55e038-2882-49b4-911c-7356d64c6352\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.765613 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities\") pod \"7b55e038-2882-49b4-911c-7356d64c6352\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.765671 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4gh8\" (UniqueName: \"kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8\") pod \"7b55e038-2882-49b4-911c-7356d64c6352\" (UID: \"7b55e038-2882-49b4-911c-7356d64c6352\") " Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.770569 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities" (OuterVolumeSpecName: "utilities") pod "7b55e038-2882-49b4-911c-7356d64c6352" (UID: "7b55e038-2882-49b4-911c-7356d64c6352"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.772528 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8" (OuterVolumeSpecName: "kube-api-access-b4gh8") pod "7b55e038-2882-49b4-911c-7356d64c6352" (UID: "7b55e038-2882-49b4-911c-7356d64c6352"). InnerVolumeSpecName "kube-api-access-b4gh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.795659 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b55e038-2882-49b4-911c-7356d64c6352" (UID: "7b55e038-2882-49b4-911c-7356d64c6352"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.868220 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4gh8\" (UniqueName: \"kubernetes.io/projected/7b55e038-2882-49b4-911c-7356d64c6352-kube-api-access-b4gh8\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.868250 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.868262 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b55e038-2882-49b4-911c-7356d64c6352-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.874139 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.973345 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jdlq\" (UniqueName: \"kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq\") pod \"5c9219a3-683a-4aec-b1a5-017efe925201\" (UID: \"5c9219a3-683a-4aec-b1a5-017efe925201\") " Nov 11 13:58:50 crc kubenswrapper[4842]: I1111 13:58:50.980760 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq" (OuterVolumeSpecName: "kube-api-access-5jdlq") pod "5c9219a3-683a-4aec-b1a5-017efe925201" (UID: "5c9219a3-683a-4aec-b1a5-017efe925201"). InnerVolumeSpecName "kube-api-access-5jdlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.056617 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.076518 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jdlq\" (UniqueName: \"kubernetes.io/projected/5c9219a3-683a-4aec-b1a5-017efe925201-kube-api-access-5jdlq\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.280325 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.299886 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.383608 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzmkr\" (UniqueName: \"kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr\") pod \"e35c9893-2858-44cb-a754-0aae7ca251ef\" (UID: \"e35c9893-2858-44cb-a754-0aae7ca251ef\") " Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.388671 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr" (OuterVolumeSpecName: "kube-api-access-xzmkr") pod "e35c9893-2858-44cb-a754-0aae7ca251ef" (UID: "e35c9893-2858-44cb-a754-0aae7ca251ef"). InnerVolumeSpecName "kube-api-access-xzmkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.488902 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzmkr\" (UniqueName: \"kubernetes.io/projected/e35c9893-2858-44cb-a754-0aae7ca251ef-kube-api-access-xzmkr\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.718368 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-nwc96" event={"ID":"e35c9893-2858-44cb-a754-0aae7ca251ef","Type":"ContainerDied","Data":"4376582dfa2f53553f9241207f1ee46217fc930557bab685364e9f1aabc97086"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.718414 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4376582dfa2f53553f9241207f1ee46217fc930557bab685364e9f1aabc97086" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.718482 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-nwc96" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.756153 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerStarted","Data":"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.759522 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerStarted","Data":"e76fb925fb81fae6639f6c4939551c32430c53e992afd92dd1901005f9046ed2"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.763194 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dcgm2" event={"ID":"7b55e038-2882-49b4-911c-7356d64c6352","Type":"ContainerDied","Data":"42ab64bfd42000be76c91108672213b57af0c7b568ec3f6264fc05c5d3ec146a"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.763227 4842 scope.go:117] "RemoveContainer" containerID="9fc9de4d1cfd1f4a5f15b40dace6ae6609c1826d1096a6006fd1fe679076a959" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.763328 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dcgm2" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.768224 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerStarted","Data":"0791a9873bb23940d7de46dfd60791c6111bea6e8d77c9b54374bd1415986605"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.792428 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerStarted","Data":"f418427b529d14969dd1fc345becc87db987ca455586276c9a57dabebd5aa9df"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.816473 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lqchx" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.817065 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lqchx" event={"ID":"5c9219a3-683a-4aec-b1a5-017efe925201","Type":"ContainerDied","Data":"a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938"} Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.817120 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a402661b532e4639fb74c2df823c91bce0a02b962a376e51bede7daddc977938" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.917877 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.922156 4842 scope.go:117] "RemoveContainer" containerID="2695e87204886cbd497f927abb9cefaed2a3cd267891a441fa7f6c88d61fa9af" Nov 11 13:58:51 crc kubenswrapper[4842]: I1111 13:58:51.932330 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dcgm2"] Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.104503 4842 scope.go:117] "RemoveContainer" containerID="010fbc55d5433836d4c097969e7a97de56a2ab2d33ccad0a6224ba2716899a7d" Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.108852 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b55e038-2882-49b4-911c-7356d64c6352" path="/var/lib/kubelet/pods/7b55e038-2882-49b4-911c-7356d64c6352/volumes" Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.871008 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerStarted","Data":"4c832a37ec3d0fef17d0c74a99ff745ff7816c6eea0495c34f12dc763626b8d1"} Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.874557 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerStarted","Data":"d23a7be85ef816ba8821ef5a2d7793259e369b801a8186e34538a77fd105803e"} Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.886225 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerStarted","Data":"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db"} Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.887217 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.894088 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerStarted","Data":"5414fda71abcc24268dcd530c3ed3b502625e9daad3dea13fd817c76de9a1eff"} Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.894267 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-log" containerID="cri-o://e76fb925fb81fae6639f6c4939551c32430c53e992afd92dd1901005f9046ed2" gracePeriod=30 Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.894385 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-httpd" containerID="cri-o://5414fda71abcc24268dcd530c3ed3b502625e9daad3dea13fd817c76de9a1eff" gracePeriod=30 Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.912876 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.625721653 podStartE2EDuration="5.912855209s" podCreationTimestamp="2025-11-11 13:58:47 +0000 UTC" firstStartedPulling="2025-11-11 13:58:48.669244566 +0000 UTC m=+1739.329534185" lastFinishedPulling="2025-11-11 13:58:48.956378132 +0000 UTC m=+1739.616667741" observedRunningTime="2025-11-11 13:58:52.894944061 +0000 UTC m=+1743.555233680" watchObservedRunningTime="2025-11-11 13:58:52.912855209 +0000 UTC m=+1743.573144828" Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.928754 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerStarted","Data":"09daa28ebfcb544bf56310fe6618559cc2a117ef31ae97db9f16bbf3ffab473f"} Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.929266 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-httpd" containerID="cri-o://09daa28ebfcb544bf56310fe6618559cc2a117ef31ae97db9f16bbf3ffab473f" gracePeriod=30 Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.929610 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-log" containerID="cri-o://0791a9873bb23940d7de46dfd60791c6111bea6e8d77c9b54374bd1415986605" gracePeriod=30 Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.958176 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.958151367 podStartE2EDuration="5.958151367s" podCreationTimestamp="2025-11-11 13:58:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:52.929061853 +0000 UTC m=+1743.589351492" watchObservedRunningTime="2025-11-11 13:58:52.958151367 +0000 UTC m=+1743.618440996" Nov 11 13:58:52 crc kubenswrapper[4842]: I1111 13:58:52.977633 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.977610485 podStartE2EDuration="6.977610485s" podCreationTimestamp="2025-11-11 13:58:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:52.949059028 +0000 UTC m=+1743.609348647" watchObservedRunningTime="2025-11-11 13:58:52.977610485 +0000 UTC m=+1743.637900104" Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.035534 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.035515183 podStartE2EDuration="7.035515183s" podCreationTimestamp="2025-11-11 13:58:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:52.969443606 +0000 UTC m=+1743.629733225" watchObservedRunningTime="2025-11-11 13:58:53.035515183 +0000 UTC m=+1743.695804802" Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.328000 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.952718 4842 generic.go:334] "Generic (PLEG): container finished" podID="579b2e2f-e46e-467f-b723-5260c79afe21" containerID="5414fda71abcc24268dcd530c3ed3b502625e9daad3dea13fd817c76de9a1eff" exitCode=0 Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.953342 4842 generic.go:334] "Generic (PLEG): container finished" podID="579b2e2f-e46e-467f-b723-5260c79afe21" containerID="e76fb925fb81fae6639f6c4939551c32430c53e992afd92dd1901005f9046ed2" exitCode=143 Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.952767 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerDied","Data":"5414fda71abcc24268dcd530c3ed3b502625e9daad3dea13fd817c76de9a1eff"} Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.953441 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerDied","Data":"e76fb925fb81fae6639f6c4939551c32430c53e992afd92dd1901005f9046ed2"} Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.960081 4842 generic.go:334] "Generic (PLEG): container finished" podID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerID="09daa28ebfcb544bf56310fe6618559cc2a117ef31ae97db9f16bbf3ffab473f" exitCode=0 Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.960113 4842 generic.go:334] "Generic (PLEG): container finished" podID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerID="0791a9873bb23940d7de46dfd60791c6111bea6e8d77c9b54374bd1415986605" exitCode=143 Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.960631 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerDied","Data":"09daa28ebfcb544bf56310fe6618559cc2a117ef31ae97db9f16bbf3ffab473f"} Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.962008 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerDied","Data":"0791a9873bb23940d7de46dfd60791c6111bea6e8d77c9b54374bd1415986605"} Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.962022 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"26b458d0-4c81-4730-b4af-fb5e0f26d830","Type":"ContainerDied","Data":"2099521672ca3f224680d9c2337777125b159c910bc52ffa1da778f2c11c9a41"} Nov 11 13:58:53 crc kubenswrapper[4842]: I1111 13:58:53.962033 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2099521672ca3f224680d9c2337777125b159c910bc52ffa1da778f2c11c9a41" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.017866 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.024222 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.059050 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:58:54 crc kubenswrapper[4842]: E1111 13:58:54.059381 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087655 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087744 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087770 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087797 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087828 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087918 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h86wc\" (UniqueName: \"kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.087972 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088008 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088047 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pkkj\" (UniqueName: \"kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088103 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088159 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"26b458d0-4c81-4730-b4af-fb5e0f26d830\" (UID: \"26b458d0-4c81-4730-b4af-fb5e0f26d830\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088191 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088275 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.088318 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle\") pod \"579b2e2f-e46e-467f-b723-5260c79afe21\" (UID: \"579b2e2f-e46e-467f-b723-5260c79afe21\") " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.094501 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs" (OuterVolumeSpecName: "logs") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.097148 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.097167 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts" (OuterVolumeSpecName: "scripts") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.099514 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc" (OuterVolumeSpecName: "kube-api-access-h86wc") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "kube-api-access-h86wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.100142 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs" (OuterVolumeSpecName: "logs") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.098837 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.116916 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.121272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.128378 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj" (OuterVolumeSpecName: "kube-api-access-2pkkj") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "kube-api-access-2pkkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.132292 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts" (OuterVolumeSpecName: "scripts") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.163242 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data" (OuterVolumeSpecName: "config-data") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.184070 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.189542 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "26b458d0-4c81-4730-b4af-fb5e0f26d830" (UID: "26b458d0-4c81-4730-b4af-fb5e0f26d830"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190867 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pkkj\" (UniqueName: \"kubernetes.io/projected/26b458d0-4c81-4730-b4af-fb5e0f26d830-kube-api-access-2pkkj\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190890 4842 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190909 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190918 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/579b2e2f-e46e-467f-b723-5260c79afe21-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190930 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190940 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190948 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190956 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190964 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b458d0-4c81-4730-b4af-fb5e0f26d830-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190973 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190981 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h86wc\" (UniqueName: \"kubernetes.io/projected/579b2e2f-e46e-467f-b723-5260c79afe21-kube-api-access-h86wc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190990 4842 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.190997 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26b458d0-4c81-4730-b4af-fb5e0f26d830-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.222437 4842 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.235619 4842 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.236405 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data" (OuterVolumeSpecName: "config-data") pod "579b2e2f-e46e-467f-b723-5260c79afe21" (UID: "579b2e2f-e46e-467f-b723-5260c79afe21"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.293832 4842 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.293877 4842 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.293890 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/579b2e2f-e46e-467f-b723-5260c79afe21-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973311 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerStarted","Data":"a946e415b396f0b1e1cda81b503203022611f0dba648e9a52a82832afcc1cef4"} Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973656 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973437 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-notification-agent" containerID="cri-o://f418427b529d14969dd1fc345becc87db987ca455586276c9a57dabebd5aa9df" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973406 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-central-agent" containerID="cri-o://22894b63708b39a784ffb4ed886e2a358dcf42c63efd7174e0b7cddd4ff50a37" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973436 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="proxy-httpd" containerID="cri-o://a946e415b396f0b1e1cda81b503203022611f0dba648e9a52a82832afcc1cef4" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.973479 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="sg-core" containerID="cri-o://4c832a37ec3d0fef17d0c74a99ff745ff7816c6eea0495c34f12dc763626b8d1" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.976956 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api-log" containerID="cri-o://a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.976984 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.977003 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.977051 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"579b2e2f-e46e-467f-b723-5260c79afe21","Type":"ContainerDied","Data":"80a887c8a770c1e66cd5e2cf1f5f72fa5c2f3f0bdb761cfaec5951a3d960ba0e"} Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.977078 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api" containerID="cri-o://d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" gracePeriod=30 Nov 11 13:58:54 crc kubenswrapper[4842]: I1111 13:58:54.977100 4842 scope.go:117] "RemoveContainer" containerID="5414fda71abcc24268dcd530c3ed3b502625e9daad3dea13fd817c76de9a1eff" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.014630 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.675511179 podStartE2EDuration="8.014608664s" podCreationTimestamp="2025-11-11 13:58:47 +0000 UTC" firstStartedPulling="2025-11-11 13:58:49.297484774 +0000 UTC m=+1739.957774393" lastFinishedPulling="2025-11-11 13:58:53.636582259 +0000 UTC m=+1744.296871878" observedRunningTime="2025-11-11 13:58:54.995008941 +0000 UTC m=+1745.655298580" watchObservedRunningTime="2025-11-11 13:58:55.014608664 +0000 UTC m=+1745.674898283" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.020543 4842 scope.go:117] "RemoveContainer" containerID="e76fb925fb81fae6639f6c4939551c32430c53e992afd92dd1901005f9046ed2" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.038979 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.054547 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.076557 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.097592 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113164 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113643 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113664 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113681 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113691 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113729 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c9219a3-683a-4aec-b1a5-017efe925201" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113737 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c9219a3-683a-4aec-b1a5-017efe925201" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113752 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="registry-server" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113759 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="registry-server" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113780 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113788 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113798 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="extract-content" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113806 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="extract-content" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113817 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35c9893-2858-44cb-a754-0aae7ca251ef" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113824 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35c9893-2858-44cb-a754-0aae7ca251ef" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113833 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113840 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113856 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113865 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: E1111 13:58:55.113881 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="extract-utilities" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.113890 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="extract-utilities" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114071 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114083 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114101 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b55e038-2882-49b4-911c-7356d64c6352" containerName="registry-server" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114128 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e35c9893-2858-44cb-a754-0aae7ca251ef" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114141 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c9219a3-683a-4aec-b1a5-017efe925201" containerName="mariadb-database-create" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114148 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114156 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" containerName="glance-log" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.114169 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" containerName="glance-httpd" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.115206 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.117936 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.120176 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.123568 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.123754 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.127093 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-rzk8r" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.132169 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.133743 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.136258 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.136556 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.161858 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211343 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211576 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-config-data\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211629 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211666 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211694 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmhtp\" (UniqueName: \"kubernetes.io/projected/762a07fe-46d0-4852-bfef-6ed8007dcd63-kube-api-access-nmhtp\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211717 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.211741 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212168 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212359 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212468 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-scripts\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212702 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-logs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212761 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212806 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212882 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-488xl\" (UniqueName: \"kubernetes.io/projected/069ae2bc-eee5-4b02-9dd7-602303027ee4-kube-api-access-488xl\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212914 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.212970 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-logs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315188 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315264 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315301 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmhtp\" (UniqueName: \"kubernetes.io/projected/762a07fe-46d0-4852-bfef-6ed8007dcd63-kube-api-access-nmhtp\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315333 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315364 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315398 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315441 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315468 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-scripts\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315502 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-logs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315528 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315550 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315585 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-488xl\" (UniqueName: \"kubernetes.io/projected/069ae2bc-eee5-4b02-9dd7-602303027ee4-kube-api-access-488xl\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315608 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315643 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-logs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315703 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.315740 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-config-data\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.316004 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.317291 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.318371 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-logs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.321941 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/762a07fe-46d0-4852-bfef-6ed8007dcd63-logs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.317623 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/069ae2bc-eee5-4b02-9dd7-602303027ee4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.330326 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.343172 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.346129 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.350155 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.351109 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-config-data\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.352397 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmhtp\" (UniqueName: \"kubernetes.io/projected/762a07fe-46d0-4852-bfef-6ed8007dcd63-kube-api-access-nmhtp\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.353957 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.379833 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.379869 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/069ae2bc-eee5-4b02-9dd7-602303027ee4-scripts\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.385307 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/762a07fe-46d0-4852-bfef-6ed8007dcd63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.386917 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-488xl\" (UniqueName: \"kubernetes.io/projected/069ae2bc-eee5-4b02-9dd7-602303027ee4-kube-api-access-488xl\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.461637 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"069ae2bc-eee5-4b02-9dd7-602303027ee4\") " pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.479876 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"762a07fe-46d0-4852-bfef-6ed8007dcd63\") " pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.604457 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.614769 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.983808 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994345 4842 generic.go:334] "Generic (PLEG): container finished" podID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerID="a946e415b396f0b1e1cda81b503203022611f0dba648e9a52a82832afcc1cef4" exitCode=0 Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994383 4842 generic.go:334] "Generic (PLEG): container finished" podID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerID="4c832a37ec3d0fef17d0c74a99ff745ff7816c6eea0495c34f12dc763626b8d1" exitCode=2 Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994392 4842 generic.go:334] "Generic (PLEG): container finished" podID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerID="f418427b529d14969dd1fc345becc87db987ca455586276c9a57dabebd5aa9df" exitCode=0 Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994434 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerDied","Data":"a946e415b396f0b1e1cda81b503203022611f0dba648e9a52a82832afcc1cef4"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994460 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerDied","Data":"4c832a37ec3d0fef17d0c74a99ff745ff7816c6eea0495c34f12dc763626b8d1"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.994473 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerDied","Data":"f418427b529d14969dd1fc345becc87db987ca455586276c9a57dabebd5aa9df"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996400 4842 generic.go:334] "Generic (PLEG): container finished" podID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerID="d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" exitCode=0 Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996424 4842 generic.go:334] "Generic (PLEG): container finished" podID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerID="a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" exitCode=143 Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996442 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerDied","Data":"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996463 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerDied","Data":"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996478 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"34c091e4-e2ab-4abe-95f6-7a8621a75f3f","Type":"ContainerDied","Data":"4193b8d3bbb3c6230f2989fb3e6ff84b7720f3a2b4336321941db81e70bbfc11"} Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996508 4842 scope.go:117] "RemoveContainer" containerID="d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" Nov 11 13:58:55 crc kubenswrapper[4842]: I1111 13:58:55.996659 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.043775 4842 scope.go:117] "RemoveContainer" containerID="a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.077091 4842 scope.go:117] "RemoveContainer" containerID="d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" Nov 11 13:58:56 crc kubenswrapper[4842]: E1111 13:58:56.077602 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db\": container with ID starting with d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db not found: ID does not exist" containerID="d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.077633 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db"} err="failed to get container status \"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db\": rpc error: code = NotFound desc = could not find container \"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db\": container with ID starting with d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db not found: ID does not exist" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.077651 4842 scope.go:117] "RemoveContainer" containerID="a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" Nov 11 13:58:56 crc kubenswrapper[4842]: E1111 13:58:56.077907 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8\": container with ID starting with a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8 not found: ID does not exist" containerID="a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.077925 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8"} err="failed to get container status \"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8\": rpc error: code = NotFound desc = could not find container \"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8\": container with ID starting with a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8 not found: ID does not exist" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.077939 4842 scope.go:117] "RemoveContainer" containerID="d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.078130 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db"} err="failed to get container status \"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db\": rpc error: code = NotFound desc = could not find container \"d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db\": container with ID starting with d7e52e45ed33db00634dcac1cf80274605b6d5254c425153221f1f87bde5a2db not found: ID does not exist" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.078144 4842 scope.go:117] "RemoveContainer" containerID="a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.078329 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8"} err="failed to get container status \"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8\": rpc error: code = NotFound desc = could not find container \"a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8\": container with ID starting with a4615a847558447da5baccbeac41990c5dc345a90d2c5f973eb6cd6fc0acfed8 not found: ID does not exist" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.098932 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26b458d0-4c81-4730-b4af-fb5e0f26d830" path="/var/lib/kubelet/pods/26b458d0-4c81-4730-b4af-fb5e0f26d830/volumes" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.100460 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="579b2e2f-e46e-467f-b723-5260c79afe21" path="/var/lib/kubelet/pods/579b2e2f-e46e-467f-b723-5260c79afe21/volumes" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.147759 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.147941 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.147986 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbclb\" (UniqueName: \"kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.148010 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.148045 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.148123 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.148179 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id\") pod \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\" (UID: \"34c091e4-e2ab-4abe-95f6-7a8621a75f3f\") " Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.152200 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.152445 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs" (OuterVolumeSpecName: "logs") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.156524 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts" (OuterVolumeSpecName: "scripts") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.162240 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb" (OuterVolumeSpecName: "kube-api-access-xbclb") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "kube-api-access-xbclb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.169557 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.169606 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f633-account-create-dwqqz"] Nov 11 13:58:56 crc kubenswrapper[4842]: E1111 13:58:56.170017 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api-log" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.170027 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api-log" Nov 11 13:58:56 crc kubenswrapper[4842]: E1111 13:58:56.170054 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.170061 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.170272 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api-log" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.170291 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" containerName="cinder-api" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.170940 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.174912 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.196316 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.204248 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f633-account-create-dwqqz"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.223900 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data" (OuterVolumeSpecName: "config-data") pod "34c091e4-e2ab-4abe-95f6-7a8621a75f3f" (UID: "34c091e4-e2ab-4abe-95f6-7a8621a75f3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249844 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249871 4842 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249882 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249891 4842 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249900 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249910 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbclb\" (UniqueName: \"kubernetes.io/projected/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-kube-api-access-xbclb\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.249918 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34c091e4-e2ab-4abe-95f6-7a8621a75f3f-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.309457 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: W1111 13:58:56.322249 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod762a07fe_46d0_4852_bfef_6ed8007dcd63.slice/crio-a875951e159f6c9b6dd196ce549a731cee26cffb5031822a36387de5049eb4cf WatchSource:0}: Error finding container a875951e159f6c9b6dd196ce549a731cee26cffb5031822a36387de5049eb4cf: Status 404 returned error can't find the container with id a875951e159f6c9b6dd196ce549a731cee26cffb5031822a36387de5049eb4cf Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.348432 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.351049 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdr2g\" (UniqueName: \"kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g\") pod \"nova-api-f633-account-create-dwqqz\" (UID: \"773d178a-6ce6-4bed-9faf-42147a7ba279\") " pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.360224 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.379683 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.386146 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.392487 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.393033 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.393074 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.399795 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.453280 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdr2g\" (UniqueName: \"kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g\") pod \"nova-api-f633-account-create-dwqqz\" (UID: \"773d178a-6ce6-4bed-9faf-42147a7ba279\") " pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.459830 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.475138 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdr2g\" (UniqueName: \"kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g\") pod \"nova-api-f633-account-create-dwqqz\" (UID: \"773d178a-6ce6-4bed-9faf-42147a7ba279\") " pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.549905 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555384 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555426 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-scripts\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555451 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555522 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-public-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555754 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/317f46b7-2e01-425e-8d9a-7df1c63a0d34-etc-machine-id\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.555797 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/317f46b7-2e01-425e-8d9a-7df1c63a0d34-logs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.556134 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data-custom\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.556176 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zknv\" (UniqueName: \"kubernetes.io/projected/317f46b7-2e01-425e-8d9a-7df1c63a0d34-kube-api-access-8zknv\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658084 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data-custom\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658163 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zknv\" (UniqueName: \"kubernetes.io/projected/317f46b7-2e01-425e-8d9a-7df1c63a0d34-kube-api-access-8zknv\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658259 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658294 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-scripts\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658317 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658343 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658444 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-public-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658481 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/317f46b7-2e01-425e-8d9a-7df1c63a0d34-etc-machine-id\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658509 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/317f46b7-2e01-425e-8d9a-7df1c63a0d34-logs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.658900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/317f46b7-2e01-425e-8d9a-7df1c63a0d34-logs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.663291 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/317f46b7-2e01-425e-8d9a-7df1c63a0d34-etc-machine-id\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.665159 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.667575 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.669237 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-public-tls-certs\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.669365 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-config-data-custom\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.671698 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.673570 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317f46b7-2e01-425e-8d9a-7df1c63a0d34-scripts\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.688617 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zknv\" (UniqueName: \"kubernetes.io/projected/317f46b7-2e01-425e-8d9a-7df1c63a0d34-kube-api-access-8zknv\") pod \"cinder-api-0\" (UID: \"317f46b7-2e01-425e-8d9a-7df1c63a0d34\") " pod="openstack/cinder-api-0" Nov 11 13:58:56 crc kubenswrapper[4842]: I1111 13:58:56.713303 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 11 13:58:57 crc kubenswrapper[4842]: I1111 13:58:57.013210 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"069ae2bc-eee5-4b02-9dd7-602303027ee4","Type":"ContainerStarted","Data":"5df302e046c2d24215e5f3e5fac74993f76f92687ddf0921bc1903901601660e"} Nov 11 13:58:57 crc kubenswrapper[4842]: I1111 13:58:57.016654 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"762a07fe-46d0-4852-bfef-6ed8007dcd63","Type":"ContainerStarted","Data":"a875951e159f6c9b6dd196ce549a731cee26cffb5031822a36387de5049eb4cf"} Nov 11 13:58:57 crc kubenswrapper[4842]: I1111 13:58:57.068407 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f633-account-create-dwqqz"] Nov 11 13:58:57 crc kubenswrapper[4842]: W1111 13:58:57.083182 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod773d178a_6ce6_4bed_9faf_42147a7ba279.slice/crio-04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f WatchSource:0}: Error finding container 04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f: Status 404 returned error can't find the container with id 04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f Nov 11 13:58:57 crc kubenswrapper[4842]: I1111 13:58:57.234249 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 11 13:58:57 crc kubenswrapper[4842]: W1111 13:58:57.244686 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod317f46b7_2e01_425e_8d9a_7df1c63a0d34.slice/crio-6682f8addd2bfca505ca6ea806d3b4e1a8d13be699fb9e8c32ca839d1efae24b WatchSource:0}: Error finding container 6682f8addd2bfca505ca6ea806d3b4e1a8d13be699fb9e8c32ca839d1efae24b: Status 404 returned error can't find the container with id 6682f8addd2bfca505ca6ea806d3b4e1a8d13be699fb9e8c32ca839d1efae24b Nov 11 13:58:57 crc kubenswrapper[4842]: I1111 13:58:57.831532 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.030052 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"762a07fe-46d0-4852-bfef-6ed8007dcd63","Type":"ContainerStarted","Data":"df3149b56660b1c365e9d589ba0d83fbe3f38b19940db46dfcce276685e5dc7d"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.030443 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"762a07fe-46d0-4852-bfef-6ed8007dcd63","Type":"ContainerStarted","Data":"e757536e07be2115371d65ae1bfe5423eab6c37ef8a8892b73fc34f9f6e35992"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.030578 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.035885 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"069ae2bc-eee5-4b02-9dd7-602303027ee4","Type":"ContainerStarted","Data":"fe652e2ad493b1bbc66f2f56ca0663d9fbf0b9f46113084a6c31e45943cedcc8"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.035961 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"069ae2bc-eee5-4b02-9dd7-602303027ee4","Type":"ContainerStarted","Data":"69e63e44478ae09b4b362bfa9cbeb6b1ae54e3d095eadf2ef9555fe98d5d0a73"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.037981 4842 generic.go:334] "Generic (PLEG): container finished" podID="773d178a-6ce6-4bed-9faf-42147a7ba279" containerID="8f09f9d4d5d0a93c018bb5bf23ced83c76caf76d4f13d07a4c010f27c3b581ad" exitCode=0 Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.038042 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f633-account-create-dwqqz" event={"ID":"773d178a-6ce6-4bed-9faf-42147a7ba279","Type":"ContainerDied","Data":"8f09f9d4d5d0a93c018bb5bf23ced83c76caf76d4f13d07a4c010f27c3b581ad"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.038076 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f633-account-create-dwqqz" event={"ID":"773d178a-6ce6-4bed-9faf-42147a7ba279","Type":"ContainerStarted","Data":"04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.042730 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"317f46b7-2e01-425e-8d9a-7df1c63a0d34","Type":"ContainerStarted","Data":"6682f8addd2bfca505ca6ea806d3b4e1a8d13be699fb9e8c32ca839d1efae24b"} Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.059674 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.059656031 podStartE2EDuration="3.059656031s" podCreationTimestamp="2025-11-11 13:58:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:58.048391943 +0000 UTC m=+1748.708681562" watchObservedRunningTime="2025-11-11 13:58:58.059656031 +0000 UTC m=+1748.719945640" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.102959 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34c091e4-e2ab-4abe-95f6-7a8621a75f3f" path="/var/lib/kubelet/pods/34c091e4-e2ab-4abe-95f6-7a8621a75f3f/volumes" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.103959 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.118492 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.118474729 podStartE2EDuration="3.118474729s" podCreationTimestamp="2025-11-11 13:58:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:58:58.106203078 +0000 UTC m=+1748.766492697" watchObservedRunningTime="2025-11-11 13:58:58.118474729 +0000 UTC m=+1748.778764348" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.148277 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.278428 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:58:58 crc kubenswrapper[4842]: I1111 13:58:58.319551 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="dnsmasq-dns" containerID="cri-o://4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115" gracePeriod=10 Nov 11 13:58:58 crc kubenswrapper[4842]: E1111 13:58:58.692492 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22cbb08d_229b_4cb4_bc84_4d9d22127ee8.slice/crio-conmon-4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22cbb08d_229b_4cb4_bc84_4d9d22127ee8.slice/crio-4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115.scope\": RecentStats: unable to find data in memory cache]" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.052853 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"317f46b7-2e01-425e-8d9a-7df1c63a0d34","Type":"ContainerStarted","Data":"ca97a54c3ddee392dd2dc6ab0efc60450e6675dc5070b5d5b2f71f26b422a39c"} Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.054741 4842 generic.go:334] "Generic (PLEG): container finished" podID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerID="4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115" exitCode=0 Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.054797 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerDied","Data":"4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115"} Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.056685 4842 generic.go:334] "Generic (PLEG): container finished" podID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerID="22894b63708b39a784ffb4ed886e2a358dcf42c63efd7174e0b7cddd4ff50a37" exitCode=0 Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.056844 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerDied","Data":"22894b63708b39a784ffb4ed886e2a358dcf42c63efd7174e0b7cddd4ff50a37"} Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.057767 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="cinder-scheduler" containerID="cri-o://60ae14b45d1eb4f9fdb276f0faa4c83ce5df64748ad12fb3bd4ffa204e39059b" gracePeriod=30 Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.058686 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="probe" containerID="cri-o://d23a7be85ef816ba8821ef5a2d7793259e369b801a8186e34538a77fd105803e" gracePeriod=30 Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.258955 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.325862 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326047 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326080 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326129 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326185 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326210 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.326253 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rptmt\" (UniqueName: \"kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt\") pod \"0a28beec-6048-4e82-b36c-963af4b4c20d\" (UID: \"0a28beec-6048-4e82-b36c-963af4b4c20d\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.327479 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.330622 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.333072 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt" (OuterVolumeSpecName: "kube-api-access-rptmt") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "kube-api-access-rptmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.334239 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts" (OuterVolumeSpecName: "scripts") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.360319 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.428439 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.428667 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.428770 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a28beec-6048-4e82-b36c-963af4b4c20d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.428844 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.428933 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rptmt\" (UniqueName: \"kubernetes.io/projected/0a28beec-6048-4e82-b36c-963af4b4c20d-kube-api-access-rptmt\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.447567 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.448416 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.456876 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data" (OuterVolumeSpecName: "config-data") pod "0a28beec-6048-4e82-b36c-963af4b4c20d" (UID: "0a28beec-6048-4e82-b36c-963af4b4c20d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.531326 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdr2g\" (UniqueName: \"kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g\") pod \"773d178a-6ce6-4bed-9faf-42147a7ba279\" (UID: \"773d178a-6ce6-4bed-9faf-42147a7ba279\") " Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.531976 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.532004 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a28beec-6048-4e82-b36c-963af4b4c20d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.534330 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g" (OuterVolumeSpecName: "kube-api-access-sdr2g") pod "773d178a-6ce6-4bed-9faf-42147a7ba279" (UID: "773d178a-6ce6-4bed-9faf-42147a7ba279"). InnerVolumeSpecName "kube-api-access-sdr2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:58:59 crc kubenswrapper[4842]: I1111 13:58:59.633976 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdr2g\" (UniqueName: \"kubernetes.io/projected/773d178a-6ce6-4bed-9faf-42147a7ba279-kube-api-access-sdr2g\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.079653 4842 generic.go:334] "Generic (PLEG): container finished" podID="063b8da7-3e80-446d-980d-226001c00491" containerID="d23a7be85ef816ba8821ef5a2d7793259e369b801a8186e34538a77fd105803e" exitCode=0 Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.079768 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerDied","Data":"d23a7be85ef816ba8821ef5a2d7793259e369b801a8186e34538a77fd105803e"} Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.083460 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"317f46b7-2e01-425e-8d9a-7df1c63a0d34","Type":"ContainerStarted","Data":"1fcd82ee638d18aa774268ea503945b2e4cc8ced4e39dd0d4c1bb88c6a4b4bda"} Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.084664 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.090874 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" event={"ID":"22cbb08d-229b-4cb4-bc84-4d9d22127ee8","Type":"ContainerDied","Data":"030c13ff1301519c3f573f5410aa11de76776ba41a640ac3b5ad0c51dc2da7a8"} Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.090920 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="030c13ff1301519c3f573f5410aa11de76776ba41a640ac3b5ad0c51dc2da7a8" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.101404 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a28beec-6048-4e82-b36c-963af4b4c20d","Type":"ContainerDied","Data":"8979af75d9a49d223baf79a5c58bbb76254596a6a7c078c3dcbfdf65f4a8f3da"} Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.101463 4842 scope.go:117] "RemoveContainer" containerID="a946e415b396f0b1e1cda81b503203022611f0dba648e9a52a82832afcc1cef4" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.101634 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.111794 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.11177729 podStartE2EDuration="4.11177729s" podCreationTimestamp="2025-11-11 13:58:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:00.106300786 +0000 UTC m=+1750.766590405" watchObservedRunningTime="2025-11-11 13:59:00.11177729 +0000 UTC m=+1750.772066919" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.112964 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f633-account-create-dwqqz" event={"ID":"773d178a-6ce6-4bed-9faf-42147a7ba279","Type":"ContainerDied","Data":"04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f"} Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.113010 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04c4854f2f27ca302f3668eb14437099f941d569e2f930725b55ff14c0d0048f" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.113084 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f633-account-create-dwqqz" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.177145 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.181575 4842 scope.go:117] "RemoveContainer" containerID="4c832a37ec3d0fef17d0c74a99ff745ff7816c6eea0495c34f12dc763626b8d1" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.208909 4842 scope.go:117] "RemoveContainer" containerID="f418427b529d14969dd1fc345becc87db987ca455586276c9a57dabebd5aa9df" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.250897 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.255430 4842 scope.go:117] "RemoveContainer" containerID="22894b63708b39a784ffb4ed886e2a358dcf42c63efd7174e0b7cddd4ff50a37" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.262236 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267345 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267847 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="init" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267862 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="init" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267875 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="773d178a-6ce6-4bed-9faf-42147a7ba279" containerName="mariadb-account-create" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267882 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="773d178a-6ce6-4bed-9faf-42147a7ba279" containerName="mariadb-account-create" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267913 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="proxy-httpd" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267921 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="proxy-httpd" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267937 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-central-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267946 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-central-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267956 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="dnsmasq-dns" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267963 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="dnsmasq-dns" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267974 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="sg-core" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.267980 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="sg-core" Nov 11 13:59:00 crc kubenswrapper[4842]: E1111 13:59:00.267998 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-notification-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268005 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-notification-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268294 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="dnsmasq-dns" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268315 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="proxy-httpd" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268335 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="773d178a-6ce6-4bed-9faf-42147a7ba279" containerName="mariadb-account-create" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268353 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="sg-core" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268371 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-notification-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.268381 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" containerName="ceilometer-central-agent" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.271897 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.273904 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.283896 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.284245 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.310660 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.344935 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfcp9\" (UniqueName: \"kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.345008 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.345046 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.345211 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.345235 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.345283 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config\") pod \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\" (UID: \"22cbb08d-229b-4cb4-bc84-4d9d22127ee8\") " Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.375415 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9" (OuterVolumeSpecName: "kube-api-access-bfcp9") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "kube-api-access-bfcp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.404150 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.427793 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.438288 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447209 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447275 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447293 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447355 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.447442 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z24v\" (UniqueName: \"kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.448396 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.448632 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfcp9\" (UniqueName: \"kubernetes.io/projected/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-kube-api-access-bfcp9\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.448661 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.448672 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.448681 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.451163 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.456756 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config" (OuterVolumeSpecName: "config") pod "22cbb08d-229b-4cb4-bc84-4d9d22127ee8" (UID: "22cbb08d-229b-4cb4-bc84-4d9d22127ee8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550650 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550729 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550758 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z24v\" (UniqueName: \"kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550826 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550862 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550887 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550910 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550974 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.550987 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/22cbb08d-229b-4cb4-bc84-4d9d22127ee8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.551973 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.552265 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.555961 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.559738 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.560447 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.568591 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z24v\" (UniqueName: \"kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.570903 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data\") pod \"ceilometer-0\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " pod="openstack/ceilometer-0" Nov 11 13:59:00 crc kubenswrapper[4842]: I1111 13:59:00.616620 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.104675 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:01 crc kubenswrapper[4842]: W1111 13:59:01.106998 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38ec4098_f6bc_457f_a9b8_cbc0b3cb49cb.slice/crio-b5ff28e8efbbfb01a041aaa5bd964f172755e89a904b85ffb015ccd035584d37 WatchSource:0}: Error finding container b5ff28e8efbbfb01a041aaa5bd964f172755e89a904b85ffb015ccd035584d37: Status 404 returned error can't find the container with id b5ff28e8efbbfb01a041aaa5bd964f172755e89a904b85ffb015ccd035584d37 Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.126344 4842 generic.go:334] "Generic (PLEG): container finished" podID="063b8da7-3e80-446d-980d-226001c00491" containerID="60ae14b45d1eb4f9fdb276f0faa4c83ce5df64748ad12fb3bd4ffa204e39059b" exitCode=0 Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.126467 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerDied","Data":"60ae14b45d1eb4f9fdb276f0faa4c83ce5df64748ad12fb3bd4ffa204e39059b"} Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.128326 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.130469 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerStarted","Data":"b5ff28e8efbbfb01a041aaa5bd964f172755e89a904b85ffb015ccd035584d37"} Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.173857 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.183287 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d9f8cf997-5qgd8"] Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.313678 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471060 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471394 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471458 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471547 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471679 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471737 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njnq8\" (UniqueName: \"kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8\") pod \"063b8da7-3e80-446d-980d-226001c00491\" (UID: \"063b8da7-3e80-446d-980d-226001c00491\") " Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.471846 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.472137 4842 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/063b8da7-3e80-446d-980d-226001c00491-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.476285 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.476334 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts" (OuterVolumeSpecName: "scripts") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.476475 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8" (OuterVolumeSpecName: "kube-api-access-njnq8") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "kube-api-access-njnq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.565127 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.573776 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njnq8\" (UniqueName: \"kubernetes.io/projected/063b8da7-3e80-446d-980d-226001c00491-kube-api-access-njnq8\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.573809 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.573821 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.573833 4842 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.633314 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data" (OuterVolumeSpecName: "config-data") pod "063b8da7-3e80-446d-980d-226001c00491" (UID: "063b8da7-3e80-446d-980d-226001c00491"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:01 crc kubenswrapper[4842]: I1111 13:59:01.675527 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/063b8da7-3e80-446d-980d-226001c00491-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.075099 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a28beec-6048-4e82-b36c-963af4b4c20d" path="/var/lib/kubelet/pods/0a28beec-6048-4e82-b36c-963af4b4c20d/volumes" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.075940 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" path="/var/lib/kubelet/pods/22cbb08d-229b-4cb4-bc84-4d9d22127ee8/volumes" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.140774 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerStarted","Data":"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8"} Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.141884 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerStarted","Data":"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5"} Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.143283 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"063b8da7-3e80-446d-980d-226001c00491","Type":"ContainerDied","Data":"228800069bc35bcee224577f2aa2cbc16aeacd40e9f64bd7159a7108f377481e"} Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.143346 4842 scope.go:117] "RemoveContainer" containerID="d23a7be85ef816ba8821ef5a2d7793259e369b801a8186e34538a77fd105803e" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.144271 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.252964 4842 scope.go:117] "RemoveContainer" containerID="60ae14b45d1eb4f9fdb276f0faa4c83ce5df64748ad12fb3bd4ffa204e39059b" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.281093 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.295214 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.305618 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:59:02 crc kubenswrapper[4842]: E1111 13:59:02.306361 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="probe" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.306476 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="probe" Nov 11 13:59:02 crc kubenswrapper[4842]: E1111 13:59:02.306576 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="cinder-scheduler" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.306653 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="cinder-scheduler" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.306979 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="cinder-scheduler" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.307069 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="063b8da7-3e80-446d-980d-226001c00491" containerName="probe" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.308673 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.313929 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.324208 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388437 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7q68\" (UniqueName: \"kubernetes.io/projected/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-kube-api-access-l7q68\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388604 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388631 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388653 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-scripts\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388671 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.388725 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.490397 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.490919 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.490553 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.491240 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-scripts\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.491373 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.491543 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.491722 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7q68\" (UniqueName: \"kubernetes.io/projected/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-kube-api-access-l7q68\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.495006 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-scripts\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.495383 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.498138 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-config-data\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.499658 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.510759 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7q68\" (UniqueName: \"kubernetes.io/projected/5eec4bcc-3000-4c55-99ea-0bee19c6fa86-kube-api-access-l7q68\") pod \"cinder-scheduler-0\" (UID: \"5eec4bcc-3000-4c55-99ea-0bee19c6fa86\") " pod="openstack/cinder-scheduler-0" Nov 11 13:59:02 crc kubenswrapper[4842]: I1111 13:59:02.645709 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.161781 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.172632 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerStarted","Data":"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89"} Nov 11 13:59:03 crc kubenswrapper[4842]: W1111 13:59:03.173447 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5eec4bcc_3000_4c55_99ea_0bee19c6fa86.slice/crio-d91646299c635be4059f798ac416d8cd320c5840825db9e26276ebcbde2ba516 WatchSource:0}: Error finding container d91646299c635be4059f798ac416d8cd320c5840825db9e26276ebcbde2ba516: Status 404 returned error can't find the container with id d91646299c635be4059f798ac416d8cd320c5840825db9e26276ebcbde2ba516 Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.180702 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.904979 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6cbfd54f69-xg8r8" Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.968061 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.968358 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74c9bc975b-pgfbr" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-api" containerID="cri-o://0b61c81c63b4f4d7d8c181c198a4701d2ac0bd48becea2ff040a3f2de345e58f" gracePeriod=30 Nov 11 13:59:03 crc kubenswrapper[4842]: I1111 13:59:03.968479 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74c9bc975b-pgfbr" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-httpd" containerID="cri-o://ebacd2dbfde027adcc40b38c5a299f7647578ccbc145b7c95148214acf782986" gracePeriod=30 Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.080819 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="063b8da7-3e80-446d-980d-226001c00491" path="/var/lib/kubelet/pods/063b8da7-3e80-446d-980d-226001c00491/volumes" Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204283 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerStarted","Data":"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7"} Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204365 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-central-agent" containerID="cri-o://71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5" gracePeriod=30 Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204425 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204449 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="proxy-httpd" containerID="cri-o://40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7" gracePeriod=30 Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204486 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="sg-core" containerID="cri-o://371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89" gracePeriod=30 Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.204517 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-notification-agent" containerID="cri-o://95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8" gracePeriod=30 Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.212830 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5eec4bcc-3000-4c55-99ea-0bee19c6fa86","Type":"ContainerStarted","Data":"432a7949d777cace2e53907e9d5359d75b48ba6c4c4f6d8e18d4088d6982421b"} Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.212867 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5eec4bcc-3000-4c55-99ea-0bee19c6fa86","Type":"ContainerStarted","Data":"d91646299c635be4059f798ac416d8cd320c5840825db9e26276ebcbde2ba516"} Nov 11 13:59:04 crc kubenswrapper[4842]: I1111 13:59:04.229808 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.6484523819999999 podStartE2EDuration="4.229793095s" podCreationTimestamp="2025-11-11 13:59:00 +0000 UTC" firstStartedPulling="2025-11-11 13:59:01.109788538 +0000 UTC m=+1751.770078157" lastFinishedPulling="2025-11-11 13:59:03.691129251 +0000 UTC m=+1754.351418870" observedRunningTime="2025-11-11 13:59:04.227888754 +0000 UTC m=+1754.888178373" watchObservedRunningTime="2025-11-11 13:59:04.229793095 +0000 UTC m=+1754.890082714" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.097466 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5d9f8cf997-5qgd8" podUID="22cbb08d-229b-4cb4-bc84-4d9d22127ee8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.177:5353: i/o timeout" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.224262 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5eec4bcc-3000-4c55-99ea-0bee19c6fa86","Type":"ContainerStarted","Data":"6d61197efe32a11ab3d929947843d4e3624bff0a0e1d8afd279564bdf35b031d"} Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.228682 4842 generic.go:334] "Generic (PLEG): container finished" podID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerID="371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89" exitCode=2 Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.228710 4842 generic.go:334] "Generic (PLEG): container finished" podID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerID="95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8" exitCode=0 Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.228746 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerDied","Data":"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89"} Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.228770 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerDied","Data":"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8"} Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.230555 4842 generic.go:334] "Generic (PLEG): container finished" podID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerID="ebacd2dbfde027adcc40b38c5a299f7647578ccbc145b7c95148214acf782986" exitCode=0 Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.230581 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerDied","Data":"ebacd2dbfde027adcc40b38c5a299f7647578ccbc145b7c95148214acf782986"} Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.250862 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.250845186 podStartE2EDuration="3.250845186s" podCreationTimestamp="2025-11-11 13:59:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:05.247548301 +0000 UTC m=+1755.907837920" watchObservedRunningTime="2025-11-11 13:59:05.250845186 +0000 UTC m=+1755.911134805" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.604820 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.605170 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.615327 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.615372 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.643712 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.653804 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.665217 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 11 13:59:05 crc kubenswrapper[4842]: I1111 13:59:05.671653 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.253932 4842 generic.go:334] "Generic (PLEG): container finished" podID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerID="0b61c81c63b4f4d7d8c181c198a4701d2ac0bd48becea2ff040a3f2de345e58f" exitCode=0 Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.254031 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerDied","Data":"0b61c81c63b4f4d7d8c181c198a4701d2ac0bd48becea2ff040a3f2de345e58f"} Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.255020 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.255064 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.255076 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.255086 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.377766 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.385959 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1ecb-account-create-crxmr"] Nov 11 13:59:06 crc kubenswrapper[4842]: E1111 13:59:06.386357 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-api" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.386374 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-api" Nov 11 13:59:06 crc kubenswrapper[4842]: E1111 13:59:06.386419 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-httpd" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.386426 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-httpd" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.386588 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-api" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.386610 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" containerName="neutron-httpd" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.387226 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.388770 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.398904 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1ecb-account-create-crxmr"] Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475084 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs\") pod \"dbdf9201-7e9d-4b1d-a890-4780e817d589\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475261 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4bkp\" (UniqueName: \"kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp\") pod \"dbdf9201-7e9d-4b1d-a890-4780e817d589\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475318 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config\") pod \"dbdf9201-7e9d-4b1d-a890-4780e817d589\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475355 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config\") pod \"dbdf9201-7e9d-4b1d-a890-4780e817d589\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475445 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle\") pod \"dbdf9201-7e9d-4b1d-a890-4780e817d589\" (UID: \"dbdf9201-7e9d-4b1d-a890-4780e817d589\") " Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.475813 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdxfd\" (UniqueName: \"kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd\") pod \"nova-cell0-1ecb-account-create-crxmr\" (UID: \"64d246d4-7202-4b17-9d4e-febbf9bbfff7\") " pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.483761 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp" (OuterVolumeSpecName: "kube-api-access-s4bkp") pod "dbdf9201-7e9d-4b1d-a890-4780e817d589" (UID: "dbdf9201-7e9d-4b1d-a890-4780e817d589"). InnerVolumeSpecName "kube-api-access-s4bkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.484180 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "dbdf9201-7e9d-4b1d-a890-4780e817d589" (UID: "dbdf9201-7e9d-4b1d-a890-4780e817d589"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.524068 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config" (OuterVolumeSpecName: "config") pod "dbdf9201-7e9d-4b1d-a890-4780e817d589" (UID: "dbdf9201-7e9d-4b1d-a890-4780e817d589"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.535254 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dbdf9201-7e9d-4b1d-a890-4780e817d589" (UID: "dbdf9201-7e9d-4b1d-a890-4780e817d589"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.573325 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "dbdf9201-7e9d-4b1d-a890-4780e817d589" (UID: "dbdf9201-7e9d-4b1d-a890-4780e817d589"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.575144 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5382-account-create-kkn8k"] Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.576756 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579018 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdxfd\" (UniqueName: \"kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd\") pod \"nova-cell0-1ecb-account-create-crxmr\" (UID: \"64d246d4-7202-4b17-9d4e-febbf9bbfff7\") " pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579146 4842 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579168 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4bkp\" (UniqueName: \"kubernetes.io/projected/dbdf9201-7e9d-4b1d-a890-4780e817d589-kube-api-access-s4bkp\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579183 4842 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579194 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579205 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbdf9201-7e9d-4b1d-a890-4780e817d589-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.579563 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.609735 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdxfd\" (UniqueName: \"kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd\") pod \"nova-cell0-1ecb-account-create-crxmr\" (UID: \"64d246d4-7202-4b17-9d4e-febbf9bbfff7\") " pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.612506 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5382-account-create-kkn8k"] Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.684895 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfkjj\" (UniqueName: \"kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj\") pod \"nova-cell1-5382-account-create-kkn8k\" (UID: \"66fea681-2020-41b9-be32-04f0b846e302\") " pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.707538 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.786733 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfkjj\" (UniqueName: \"kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj\") pod \"nova-cell1-5382-account-create-kkn8k\" (UID: \"66fea681-2020-41b9-be32-04f0b846e302\") " pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.811134 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfkjj\" (UniqueName: \"kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj\") pod \"nova-cell1-5382-account-create-kkn8k\" (UID: \"66fea681-2020-41b9-be32-04f0b846e302\") " pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:06 crc kubenswrapper[4842]: I1111 13:59:06.931598 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.292066 4842 generic.go:334] "Generic (PLEG): container finished" podID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerID="71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5" exitCode=0 Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.293033 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerDied","Data":"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5"} Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.309448 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74c9bc975b-pgfbr" Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.310169 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74c9bc975b-pgfbr" event={"ID":"dbdf9201-7e9d-4b1d-a890-4780e817d589","Type":"ContainerDied","Data":"b35f9c9810ac691025f3a1759db198999c210d20e7751ef245669628f931bb8a"} Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.310210 4842 scope.go:117] "RemoveContainer" containerID="ebacd2dbfde027adcc40b38c5a299f7647578ccbc145b7c95148214acf782986" Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.333574 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1ecb-account-create-crxmr"] Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.358774 4842 scope.go:117] "RemoveContainer" containerID="0b61c81c63b4f4d7d8c181c198a4701d2ac0bd48becea2ff040a3f2de345e58f" Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.364846 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.376281 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-74c9bc975b-pgfbr"] Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.593125 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5382-account-create-kkn8k"] Nov 11 13:59:07 crc kubenswrapper[4842]: I1111 13:59:07.646302 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.059964 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:59:08 crc kubenswrapper[4842]: E1111 13:59:08.060301 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.072372 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbdf9201-7e9d-4b1d-a890-4780e817d589" path="/var/lib/kubelet/pods/dbdf9201-7e9d-4b1d-a890-4780e817d589/volumes" Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.331254 4842 generic.go:334] "Generic (PLEG): container finished" podID="66fea681-2020-41b9-be32-04f0b846e302" containerID="ddad56ec3eb384de6a1cd53346451ed25181a486cac02184413c318b6d4b1f39" exitCode=0 Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.331325 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5382-account-create-kkn8k" event={"ID":"66fea681-2020-41b9-be32-04f0b846e302","Type":"ContainerDied","Data":"ddad56ec3eb384de6a1cd53346451ed25181a486cac02184413c318b6d4b1f39"} Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.331545 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5382-account-create-kkn8k" event={"ID":"66fea681-2020-41b9-be32-04f0b846e302","Type":"ContainerStarted","Data":"a00a9ed75bffbe172c6f6d8dcc57e80832eab3df0a5611731c5aeaee61ca44d1"} Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.337417 4842 generic.go:334] "Generic (PLEG): container finished" podID="64d246d4-7202-4b17-9d4e-febbf9bbfff7" containerID="c38bc0cb8d91fe393ee940ecd1c837b65385ed477d6b18e59f0cc0229bcaf6cc" exitCode=0 Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.337570 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.337584 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.337687 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1ecb-account-create-crxmr" event={"ID":"64d246d4-7202-4b17-9d4e-febbf9bbfff7","Type":"ContainerDied","Data":"c38bc0cb8d91fe393ee940ecd1c837b65385ed477d6b18e59f0cc0229bcaf6cc"} Nov 11 13:59:08 crc kubenswrapper[4842]: I1111 13:59:08.337734 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1ecb-account-create-crxmr" event={"ID":"64d246d4-7202-4b17-9d4e-febbf9bbfff7","Type":"ContainerStarted","Data":"85c964f656a8a3e42471609d90d195c8514a7ade654c9ad98caf92eb18cf0e24"} Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.338995 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.792263 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.792394 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.794473 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.851168 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.851584 4842 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 11 13:59:09 crc kubenswrapper[4842]: I1111 13:59:09.915790 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.003614 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.014877 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.173069 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfkjj\" (UniqueName: \"kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj\") pod \"66fea681-2020-41b9-be32-04f0b846e302\" (UID: \"66fea681-2020-41b9-be32-04f0b846e302\") " Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.173286 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdxfd\" (UniqueName: \"kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd\") pod \"64d246d4-7202-4b17-9d4e-febbf9bbfff7\" (UID: \"64d246d4-7202-4b17-9d4e-febbf9bbfff7\") " Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.180790 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj" (OuterVolumeSpecName: "kube-api-access-cfkjj") pod "66fea681-2020-41b9-be32-04f0b846e302" (UID: "66fea681-2020-41b9-be32-04f0b846e302"). InnerVolumeSpecName "kube-api-access-cfkjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.188568 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd" (OuterVolumeSpecName: "kube-api-access-sdxfd") pod "64d246d4-7202-4b17-9d4e-febbf9bbfff7" (UID: "64d246d4-7202-4b17-9d4e-febbf9bbfff7"). InnerVolumeSpecName "kube-api-access-sdxfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.276121 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdxfd\" (UniqueName: \"kubernetes.io/projected/64d246d4-7202-4b17-9d4e-febbf9bbfff7-kube-api-access-sdxfd\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.276156 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfkjj\" (UniqueName: \"kubernetes.io/projected/66fea681-2020-41b9-be32-04f0b846e302-kube-api-access-cfkjj\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.357522 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5382-account-create-kkn8k" event={"ID":"66fea681-2020-41b9-be32-04f0b846e302","Type":"ContainerDied","Data":"a00a9ed75bffbe172c6f6d8dcc57e80832eab3df0a5611731c5aeaee61ca44d1"} Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.357572 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a00a9ed75bffbe172c6f6d8dcc57e80832eab3df0a5611731c5aeaee61ca44d1" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.357646 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5382-account-create-kkn8k" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.366567 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1ecb-account-create-crxmr" event={"ID":"64d246d4-7202-4b17-9d4e-febbf9bbfff7","Type":"ContainerDied","Data":"85c964f656a8a3e42471609d90d195c8514a7ade654c9ad98caf92eb18cf0e24"} Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.366605 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85c964f656a8a3e42471609d90d195c8514a7ade654c9ad98caf92eb18cf0e24" Nov 11 13:59:10 crc kubenswrapper[4842]: I1111 13:59:10.366794 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1ecb-account-create-crxmr" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.521144 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cdldt"] Nov 11 13:59:11 crc kubenswrapper[4842]: E1111 13:59:11.521705 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66fea681-2020-41b9-be32-04f0b846e302" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.521717 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="66fea681-2020-41b9-be32-04f0b846e302" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: E1111 13:59:11.521738 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64d246d4-7202-4b17-9d4e-febbf9bbfff7" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.521743 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="64d246d4-7202-4b17-9d4e-febbf9bbfff7" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.521940 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="66fea681-2020-41b9-be32-04f0b846e302" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.521958 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="64d246d4-7202-4b17-9d4e-febbf9bbfff7" containerName="mariadb-account-create" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.522601 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: W1111 13:59:11.524523 4842 reflector.go:561] object-"openstack"/"nova-nova-dockercfg-b2mm6": failed to list *v1.Secret: secrets "nova-nova-dockercfg-b2mm6" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 11 13:59:11 crc kubenswrapper[4842]: E1111 13:59:11.524579 4842 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"nova-nova-dockercfg-b2mm6\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"nova-nova-dockercfg-b2mm6\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 11 13:59:11 crc kubenswrapper[4842]: W1111 13:59:11.525111 4842 reflector.go:561] object-"openstack"/"nova-cell0-conductor-scripts": failed to list *v1.Secret: secrets "nova-cell0-conductor-scripts" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 11 13:59:11 crc kubenswrapper[4842]: E1111 13:59:11.525155 4842 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"nova-cell0-conductor-scripts\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"nova-cell0-conductor-scripts\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.525229 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.547901 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cdldt"] Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.703358 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.703403 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f27d8\" (UniqueName: \"kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.703483 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.703535 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.804881 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.805225 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f27d8\" (UniqueName: \"kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.805351 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.805417 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.813157 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.816081 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:11 crc kubenswrapper[4842]: I1111 13:59:11.825712 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f27d8\" (UniqueName: \"kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.388207 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.406745 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts\") pod \"nova-cell0-conductor-db-sync-cdldt\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.451732 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-b2mm6" Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.459924 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.873738 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 11 13:59:12 crc kubenswrapper[4842]: W1111 13:59:12.939309 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb167dbe_4958_4dff_8389_4fcd23764a9c.slice/crio-b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb WatchSource:0}: Error finding container b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb: Status 404 returned error can't find the container with id b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb Nov 11 13:59:12 crc kubenswrapper[4842]: I1111 13:59:12.955081 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cdldt"] Nov 11 13:59:13 crc kubenswrapper[4842]: I1111 13:59:13.408994 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cdldt" event={"ID":"cb167dbe-4958-4dff-8389-4fcd23764a9c","Type":"ContainerStarted","Data":"b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb"} Nov 11 13:59:22 crc kubenswrapper[4842]: I1111 13:59:22.492317 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cdldt" event={"ID":"cb167dbe-4958-4dff-8389-4fcd23764a9c","Type":"ContainerStarted","Data":"1cdfbb355e8a2da5d86b3e9dfe3a1c82316efab30a662bd060764f099610aa02"} Nov 11 13:59:22 crc kubenswrapper[4842]: I1111 13:59:22.519978 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-cdldt" podStartSLOduration=2.33798185 podStartE2EDuration="11.519942096s" podCreationTimestamp="2025-11-11 13:59:11 +0000 UTC" firstStartedPulling="2025-11-11 13:59:12.940711095 +0000 UTC m=+1763.601000714" lastFinishedPulling="2025-11-11 13:59:22.122671341 +0000 UTC m=+1772.782960960" observedRunningTime="2025-11-11 13:59:22.505622051 +0000 UTC m=+1773.165911670" watchObservedRunningTime="2025-11-11 13:59:22.519942096 +0000 UTC m=+1773.180231725" Nov 11 13:59:23 crc kubenswrapper[4842]: I1111 13:59:23.058980 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:59:23 crc kubenswrapper[4842]: E1111 13:59:23.059284 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:59:24 crc kubenswrapper[4842]: I1111 13:59:24.046301 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:24 crc kubenswrapper[4842]: I1111 13:59:24.046553 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" containerID="cri-o://ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d" gracePeriod=30 Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.027089 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.040846 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle\") pod \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.040888 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data\") pod \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.040967 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqlgg\" (UniqueName: \"kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg\") pod \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.041025 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca\") pod \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.041045 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs\") pod \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\" (UID: \"643d2817-dea0-4bc0-81b1-6f83eec1d4ca\") " Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.041844 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs" (OuterVolumeSpecName: "logs") pod "643d2817-dea0-4bc0-81b1-6f83eec1d4ca" (UID: "643d2817-dea0-4bc0-81b1-6f83eec1d4ca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.051419 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg" (OuterVolumeSpecName: "kube-api-access-zqlgg") pod "643d2817-dea0-4bc0-81b1-6f83eec1d4ca" (UID: "643d2817-dea0-4bc0-81b1-6f83eec1d4ca"). InnerVolumeSpecName "kube-api-access-zqlgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.125435 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "643d2817-dea0-4bc0-81b1-6f83eec1d4ca" (UID: "643d2817-dea0-4bc0-81b1-6f83eec1d4ca"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.141205 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data" (OuterVolumeSpecName: "config-data") pod "643d2817-dea0-4bc0-81b1-6f83eec1d4ca" (UID: "643d2817-dea0-4bc0-81b1-6f83eec1d4ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.147058 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.147127 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqlgg\" (UniqueName: \"kubernetes.io/projected/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-kube-api-access-zqlgg\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.147140 4842 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.147150 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.155491 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "643d2817-dea0-4bc0-81b1-6f83eec1d4ca" (UID: "643d2817-dea0-4bc0-81b1-6f83eec1d4ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.249141 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/643d2817-dea0-4bc0-81b1-6f83eec1d4ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.534719 4842 generic.go:334] "Generic (PLEG): container finished" podID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerID="ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d" exitCode=0 Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.534773 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerDied","Data":"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d"} Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.534786 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.534813 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"643d2817-dea0-4bc0-81b1-6f83eec1d4ca","Type":"ContainerDied","Data":"5fada185db5847a3fb73ad8abb99ebfbc8ef0f35fc193c54109495d98a05c9a4"} Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.534835 4842 scope.go:117] "RemoveContainer" containerID="ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.571507 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.647344 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.672089 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:26 crc kubenswrapper[4842]: E1111 13:59:26.673075 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.673223 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: E1111 13:59:26.673362 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.673474 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.673921 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.674052 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.674330 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.675800 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.678844 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.680187 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.717290 4842 scope.go:117] "RemoveContainer" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.774357 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.774482 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-logs\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.774523 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmvdj\" (UniqueName: \"kubernetes.io/projected/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-kube-api-access-nmvdj\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.774688 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.774756 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-config-data\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.790738 4842 scope.go:117] "RemoveContainer" containerID="ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d" Nov 11 13:59:26 crc kubenswrapper[4842]: E1111 13:59:26.791595 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d\": container with ID starting with ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d not found: ID does not exist" containerID="ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.791628 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d"} err="failed to get container status \"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d\": rpc error: code = NotFound desc = could not find container \"ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d\": container with ID starting with ae2184d17176b89a1dfff585160b99be5e362c233b5deefa86292b301f13180d not found: ID does not exist" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.791659 4842 scope.go:117] "RemoveContainer" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:59:26 crc kubenswrapper[4842]: E1111 13:59:26.792092 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9\": container with ID starting with a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9 not found: ID does not exist" containerID="a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.792147 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9"} err="failed to get container status \"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9\": rpc error: code = NotFound desc = could not find container \"a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9\": container with ID starting with a7c669247ecb519f8a80e68c222b6d368bcdcd7a971337722d385774a9de4da9 not found: ID does not exist" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.876499 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.876595 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-config-data\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.876643 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.876696 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-logs\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.876727 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmvdj\" (UniqueName: \"kubernetes.io/projected/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-kube-api-access-nmvdj\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.877735 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-logs\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.881223 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.881785 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.882083 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-config-data\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:26 crc kubenswrapper[4842]: I1111 13:59:26.905046 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmvdj\" (UniqueName: \"kubernetes.io/projected/3bf4b336-4ea3-4fe5-ad16-5a6047338cf3-kube-api-access-nmvdj\") pod \"watcher-decision-engine-0\" (UID: \"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3\") " pod="openstack/watcher-decision-engine-0" Nov 11 13:59:27 crc kubenswrapper[4842]: I1111 13:59:27.020142 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:27 crc kubenswrapper[4842]: I1111 13:59:27.544590 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 11 13:59:28 crc kubenswrapper[4842]: I1111 13:59:28.069463 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" path="/var/lib/kubelet/pods/643d2817-dea0-4bc0-81b1-6f83eec1d4ca/volumes" Nov 11 13:59:28 crc kubenswrapper[4842]: I1111 13:59:28.556717 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3","Type":"ContainerStarted","Data":"3b44538d0102be56fc31736e98001ec74c6aea1861a0d767009a1bb311b9be43"} Nov 11 13:59:28 crc kubenswrapper[4842]: I1111 13:59:28.556763 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"3bf4b336-4ea3-4fe5-ad16-5a6047338cf3","Type":"ContainerStarted","Data":"d41eef38c696a492dcfc2e1c16cfb5dd7bec8fd3ed26a4a581a855c0cf25a837"} Nov 11 13:59:28 crc kubenswrapper[4842]: I1111 13:59:28.579988 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.579969754 podStartE2EDuration="2.579969754s" podCreationTimestamp="2025-11-11 13:59:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:28.574924134 +0000 UTC m=+1779.235213753" watchObservedRunningTime="2025-11-11 13:59:28.579969754 +0000 UTC m=+1779.240259373" Nov 11 13:59:30 crc kubenswrapper[4842]: I1111 13:59:30.165900 4842 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod0a28beec-6048-4e82-b36c-963af4b4c20d"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod0a28beec-6048-4e82-b36c-963af4b4c20d] : Timed out while waiting for systemd to remove kubepods-besteffort-pod0a28beec_6048_4e82_b36c_963af4b4c20d.slice" Nov 11 13:59:30 crc kubenswrapper[4842]: I1111 13:59:30.622465 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.608930 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.617364 4842 generic.go:334] "Generic (PLEG): container finished" podID="cb167dbe-4958-4dff-8389-4fcd23764a9c" containerID="1cdfbb355e8a2da5d86b3e9dfe3a1c82316efab30a662bd060764f099610aa02" exitCode=0 Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.617580 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cdldt" event={"ID":"cb167dbe-4958-4dff-8389-4fcd23764a9c","Type":"ContainerDied","Data":"1cdfbb355e8a2da5d86b3e9dfe3a1c82316efab30a662bd060764f099610aa02"} Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.621514 4842 generic.go:334] "Generic (PLEG): container finished" podID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerID="40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7" exitCode=137 Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.621564 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerDied","Data":"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7"} Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.621597 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb","Type":"ContainerDied","Data":"b5ff28e8efbbfb01a041aaa5bd964f172755e89a904b85ffb015ccd035584d37"} Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.621600 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.621619 4842 scope.go:117] "RemoveContainer" containerID="40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.650592 4842 scope.go:117] "RemoveContainer" containerID="371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.672063 4842 scope.go:117] "RemoveContainer" containerID="95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.690961 4842 scope.go:117] "RemoveContainer" containerID="71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.714025 4842 scope.go:117] "RemoveContainer" containerID="40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.715557 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7\": container with ID starting with 40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7 not found: ID does not exist" containerID="40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.715793 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7"} err="failed to get container status \"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7\": rpc error: code = NotFound desc = could not find container \"40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7\": container with ID starting with 40c138306164b6c236f1f13515ddb8f13361c81f39e10d1df316769c674f8ab7 not found: ID does not exist" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.715878 4842 scope.go:117] "RemoveContainer" containerID="371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.716485 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89\": container with ID starting with 371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89 not found: ID does not exist" containerID="371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.716532 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89"} err="failed to get container status \"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89\": rpc error: code = NotFound desc = could not find container \"371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89\": container with ID starting with 371acb25bb1e218c990fea0151d8419d38cd6c0e557471a3da5e759913d2fa89 not found: ID does not exist" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.716563 4842 scope.go:117] "RemoveContainer" containerID="95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.716933 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8\": container with ID starting with 95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8 not found: ID does not exist" containerID="95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.716991 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8"} err="failed to get container status \"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8\": rpc error: code = NotFound desc = could not find container \"95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8\": container with ID starting with 95e596077d6a179e013bf72692100b5f28088684861e11bd71b2dc8c9b1722c8 not found: ID does not exist" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.717019 4842 scope.go:117] "RemoveContainer" containerID="71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.717313 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5\": container with ID starting with 71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5 not found: ID does not exist" containerID="71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.717343 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5"} err="failed to get container status \"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5\": rpc error: code = NotFound desc = could not find container \"71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5\": container with ID starting with 71bc8b5a2b12ec1e723a43d636dd56ad4cc5f487efdebc8b697f9f8a577a52f5 not found: ID does not exist" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.727414 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.727525 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.727592 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6z24v\" (UniqueName: \"kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.727624 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.728133 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.728123 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.727729 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.728429 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.728493 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml\") pod \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\" (UID: \"38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb\") " Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.729155 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.729172 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.740415 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts" (OuterVolumeSpecName: "scripts") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.763372 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v" (OuterVolumeSpecName: "kube-api-access-6z24v") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "kube-api-access-6z24v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.767431 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.820751 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.830053 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.830092 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6z24v\" (UniqueName: \"kubernetes.io/projected/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-kube-api-access-6z24v\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.830129 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.830141 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.850742 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data" (OuterVolumeSpecName: "config-data") pod "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" (UID: "38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.931382 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.954691 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.965453 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982081 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.982470 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-central-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982488 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-central-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.982501 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-notification-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982507 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-notification-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.982522 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="sg-core" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982528 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="sg-core" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.982547 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="proxy-httpd" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982552 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="proxy-httpd" Nov 11 13:59:34 crc kubenswrapper[4842]: E1111 13:59:34.982569 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982575 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="643d2817-dea0-4bc0-81b1-6f83eec1d4ca" containerName="watcher-decision-engine" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982745 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="sg-core" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982760 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="proxy-httpd" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982772 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-central-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.982789 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" containerName="ceilometer-notification-agent" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.984362 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.989823 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.990334 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 13:59:34 crc kubenswrapper[4842]: I1111 13:59:34.990766 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.134527 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135014 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5slpg\" (UniqueName: \"kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135085 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135126 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135152 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135200 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.135242 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.236851 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237007 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5slpg\" (UniqueName: \"kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237061 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237077 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237094 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.237197 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.238126 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.240542 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.241655 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.242262 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.242517 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.250135 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.252954 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5slpg\" (UniqueName: \"kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg\") pod \"ceilometer-0\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.307769 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.748976 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 13:59:35 crc kubenswrapper[4842]: I1111 13:59:35.954806 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.059614 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:59:36 crc kubenswrapper[4842]: E1111 13:59:36.059930 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.071924 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb" path="/var/lib/kubelet/pods/38ec4098-f6bc-457f-a9b8-cbc0b3cb49cb/volumes" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.153004 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f27d8\" (UniqueName: \"kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8\") pod \"cb167dbe-4958-4dff-8389-4fcd23764a9c\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.153391 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts\") pod \"cb167dbe-4958-4dff-8389-4fcd23764a9c\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.153510 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data\") pod \"cb167dbe-4958-4dff-8389-4fcd23764a9c\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.153743 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle\") pod \"cb167dbe-4958-4dff-8389-4fcd23764a9c\" (UID: \"cb167dbe-4958-4dff-8389-4fcd23764a9c\") " Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.158452 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts" (OuterVolumeSpecName: "scripts") pod "cb167dbe-4958-4dff-8389-4fcd23764a9c" (UID: "cb167dbe-4958-4dff-8389-4fcd23764a9c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.158832 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8" (OuterVolumeSpecName: "kube-api-access-f27d8") pod "cb167dbe-4958-4dff-8389-4fcd23764a9c" (UID: "cb167dbe-4958-4dff-8389-4fcd23764a9c"). InnerVolumeSpecName "kube-api-access-f27d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.187641 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data" (OuterVolumeSpecName: "config-data") pod "cb167dbe-4958-4dff-8389-4fcd23764a9c" (UID: "cb167dbe-4958-4dff-8389-4fcd23764a9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.188254 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb167dbe-4958-4dff-8389-4fcd23764a9c" (UID: "cb167dbe-4958-4dff-8389-4fcd23764a9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.257002 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f27d8\" (UniqueName: \"kubernetes.io/projected/cb167dbe-4958-4dff-8389-4fcd23764a9c-kube-api-access-f27d8\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.257059 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.257096 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.257124 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb167dbe-4958-4dff-8389-4fcd23764a9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.643969 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerStarted","Data":"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990"} Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.644017 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerStarted","Data":"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6"} Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.644031 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerStarted","Data":"a191cbd7fdf7a76f5ca6ffc4da37583756d823e7387b9f71b8ff0a2cced8b9c3"} Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.645871 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cdldt" event={"ID":"cb167dbe-4958-4dff-8389-4fcd23764a9c","Type":"ContainerDied","Data":"b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb"} Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.646006 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b43949bbb446658797392a2c5e87bc31b1e68d6cd7dc75814f99c5239a4856bb" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.645954 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cdldt" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.771186 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 11 13:59:36 crc kubenswrapper[4842]: E1111 13:59:36.771603 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb167dbe-4958-4dff-8389-4fcd23764a9c" containerName="nova-cell0-conductor-db-sync" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.771629 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb167dbe-4958-4dff-8389-4fcd23764a9c" containerName="nova-cell0-conductor-db-sync" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.771841 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb167dbe-4958-4dff-8389-4fcd23764a9c" containerName="nova-cell0-conductor-db-sync" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.773761 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.777119 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.777443 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-b2mm6" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.789301 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.969920 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.971387 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7j87\" (UniqueName: \"kubernetes.io/projected/c743e88d-2ca8-45a8-9e26-7106975b5be3-kube-api-access-p7j87\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:36 crc kubenswrapper[4842]: I1111 13:59:36.972200 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.020412 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.049779 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.074536 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.074590 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7j87\" (UniqueName: \"kubernetes.io/projected/c743e88d-2ca8-45a8-9e26-7106975b5be3-kube-api-access-p7j87\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.074645 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.081049 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.091235 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7j87\" (UniqueName: \"kubernetes.io/projected/c743e88d-2ca8-45a8-9e26-7106975b5be3-kube-api-access-p7j87\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.091398 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c743e88d-2ca8-45a8-9e26-7106975b5be3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c743e88d-2ca8-45a8-9e26-7106975b5be3\") " pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.203377 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.657649 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerStarted","Data":"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b"} Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.658248 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.680821 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 11 13:59:37 crc kubenswrapper[4842]: I1111 13:59:37.700390 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 11 13:59:37 crc kubenswrapper[4842]: W1111 13:59:37.720628 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc743e88d_2ca8_45a8_9e26_7106975b5be3.slice/crio-6df06ed793229938fa10976f76e1af1c214462fa6dc7c7fa13edf08e055a0c73 WatchSource:0}: Error finding container 6df06ed793229938fa10976f76e1af1c214462fa6dc7c7fa13edf08e055a0c73: Status 404 returned error can't find the container with id 6df06ed793229938fa10976f76e1af1c214462fa6dc7c7fa13edf08e055a0c73 Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.671430 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerStarted","Data":"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc"} Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.671935 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.673577 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c743e88d-2ca8-45a8-9e26-7106975b5be3","Type":"ContainerStarted","Data":"df034ec142c1e12c79d0c5fcc9aa3c1a4170a41ef5f7c37aa0ace363d205d1b1"} Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.673611 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c743e88d-2ca8-45a8-9e26-7106975b5be3","Type":"ContainerStarted","Data":"6df06ed793229938fa10976f76e1af1c214462fa6dc7c7fa13edf08e055a0c73"} Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.673904 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.694255 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.449422866 podStartE2EDuration="4.694233313s" podCreationTimestamp="2025-11-11 13:59:34 +0000 UTC" firstStartedPulling="2025-11-11 13:59:35.766606625 +0000 UTC m=+1786.426896254" lastFinishedPulling="2025-11-11 13:59:38.011417082 +0000 UTC m=+1788.671706701" observedRunningTime="2025-11-11 13:59:38.692835669 +0000 UTC m=+1789.353125298" watchObservedRunningTime="2025-11-11 13:59:38.694233313 +0000 UTC m=+1789.354522942" Nov 11 13:59:38 crc kubenswrapper[4842]: I1111 13:59:38.712381 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.712363849 podStartE2EDuration="2.712363849s" podCreationTimestamp="2025-11-11 13:59:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:38.705760489 +0000 UTC m=+1789.366050108" watchObservedRunningTime="2025-11-11 13:59:38.712363849 +0000 UTC m=+1789.372653478" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.231307 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.680679 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-tbtw2"] Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.682336 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.684359 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.684406 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.690973 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tbtw2"] Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.787265 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.787321 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.787379 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.787446 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfvh9\" (UniqueName: \"kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.868957 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.870710 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.873547 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.883667 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888674 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888736 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888807 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b79dw\" (UniqueName: \"kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888832 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfvh9\" (UniqueName: \"kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888888 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888915 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888939 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.888955 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.901670 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.912755 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.931766 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.956227 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.956891 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfvh9\" (UniqueName: \"kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9\") pod \"nova-cell0-cell-mapping-tbtw2\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.957887 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.962574 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991283 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991338 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzwxp\" (UniqueName: \"kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991365 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991449 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b79dw\" (UniqueName: \"kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991485 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991537 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.991559 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.993193 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:42 crc kubenswrapper[4842]: I1111 13:59:42.993656 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.004646 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.006865 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.016849 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.019190 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.027669 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.033296 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.033941 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.047321 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b79dw\" (UniqueName: \"kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw\") pod \"nova-api-0\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " pod="openstack/nova-api-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.098540 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.102285 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.102338 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzwxp\" (UniqueName: \"kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.116482 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.119955 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.171563 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzwxp\" (UniqueName: \"kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp\") pod \"nova-scheduler-0\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.191754 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.215903 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.215963 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.216236 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztr2n\" (UniqueName: \"kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.216310 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.221220 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.226487 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.227609 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.253054 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.290362 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.292545 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.294748 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317439 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl5nq\" (UniqueName: \"kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317525 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztr2n\" (UniqueName: \"kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317550 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317594 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317657 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317693 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317709 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317730 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317759 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.317804 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.318757 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.325790 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.334891 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.338183 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztr2n\" (UniqueName: \"kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.344746 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data\") pod \"nova-metadata-0\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420556 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npsls\" (UniqueName: \"kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420604 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420655 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420684 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420741 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420765 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420800 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420823 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl5nq\" (UniqueName: \"kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.420861 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.422044 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.422577 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.423064 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.424535 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.425313 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.455557 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl5nq\" (UniqueName: \"kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq\") pod \"dnsmasq-dns-754c5fdd47-xdgzz\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.524118 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.524211 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.524257 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npsls\" (UniqueName: \"kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.529027 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.541016 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.561829 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npsls\" (UniqueName: \"kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls\") pod \"nova-cell1-novncproxy-0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.564466 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.601170 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.627612 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.877262 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 13:59:43 crc kubenswrapper[4842]: W1111 13:59:43.877941 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bb18f21_174c_4d4f_b984_aca9ae17ea76.slice/crio-c8a83ee5f74c4a4dcfe07857947c5f01235e6a8098c98417bf7b5ef4b73046a9 WatchSource:0}: Error finding container c8a83ee5f74c4a4dcfe07857947c5f01235e6a8098c98417bf7b5ef4b73046a9: Status 404 returned error can't find the container with id c8a83ee5f74c4a4dcfe07857947c5f01235e6a8098c98417bf7b5ef4b73046a9 Nov 11 13:59:43 crc kubenswrapper[4842]: I1111 13:59:43.925357 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tbtw2"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.203985 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.334636 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-fxmwr"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.336709 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.338412 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.338673 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.345541 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-fxmwr"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.355933 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 13:59:44 crc kubenswrapper[4842]: W1111 13:59:44.436620 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode91d7863_1ecc_487b_a9fa_25c800e3d54a.slice/crio-54687dab5d02e48fc0b562769261a4b8ac78a3184b0810a16cef835d19a317fc WatchSource:0}: Error finding container 54687dab5d02e48fc0b562769261a4b8ac78a3184b0810a16cef835d19a317fc: Status 404 returned error can't find the container with id 54687dab5d02e48fc0b562769261a4b8ac78a3184b0810a16cef835d19a317fc Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.440598 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.448163 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.448287 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.448321 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.448378 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4glqh\" (UniqueName: \"kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.543057 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.551136 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.551190 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.551243 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4glqh\" (UniqueName: \"kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.551452 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: W1111 13:59:44.574199 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d5e92bc_1b70_455d_9397_985e2b92f0e0.slice/crio-09b59231212636ac41f1f3ab7d3ce163fb9de4fe1a8930d4c903f9aca8680dca WatchSource:0}: Error finding container 09b59231212636ac41f1f3ab7d3ce163fb9de4fe1a8930d4c903f9aca8680dca: Status 404 returned error can't find the container with id 09b59231212636ac41f1f3ab7d3ce163fb9de4fe1a8930d4c903f9aca8680dca Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.574625 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.575359 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4glqh\" (UniqueName: \"kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.575512 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.576417 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts\") pod \"nova-cell1-conductor-db-sync-fxmwr\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.730481 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.765662 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tbtw2" event={"ID":"a471b182-7b05-4a81-93eb-257a2ce28a68","Type":"ContainerStarted","Data":"41ba1f5abbba2b1bf69614a768a73e9b4f9571536f9042c60b3bf1beb4f0a195"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.765717 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tbtw2" event={"ID":"a471b182-7b05-4a81-93eb-257a2ce28a68","Type":"ContainerStarted","Data":"040e059a13b4b243454d31c85b75f65c43efe36a8f08ba77f595e8c659b69033"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.770618 4842 generic.go:334] "Generic (PLEG): container finished" podID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerID="00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473" exitCode=0 Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.770691 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" event={"ID":"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9","Type":"ContainerDied","Data":"00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.770756 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" event={"ID":"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9","Type":"ContainerStarted","Data":"67c5e17e4f1ba4633efa3e4de984e7104fce2e38b917004e34feb6d6119886a0"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.772697 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerStarted","Data":"54687dab5d02e48fc0b562769261a4b8ac78a3184b0810a16cef835d19a317fc"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.774458 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2d5e92bc-1b70-455d-9397-985e2b92f0e0","Type":"ContainerStarted","Data":"09b59231212636ac41f1f3ab7d3ce163fb9de4fe1a8930d4c903f9aca8680dca"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.778619 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerStarted","Data":"c8a83ee5f74c4a4dcfe07857947c5f01235e6a8098c98417bf7b5ef4b73046a9"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.783968 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c794bc2f-a5a8-4ae5-836e-18367511637e","Type":"ContainerStarted","Data":"6acc90cda0b89b4b36930bbf3fb24b4feb924621dc15bb4c80745bd7897725d0"} Nov 11 13:59:44 crc kubenswrapper[4842]: I1111 13:59:44.793795 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-tbtw2" podStartSLOduration=2.7937749370000002 podStartE2EDuration="2.793774937s" podCreationTimestamp="2025-11-11 13:59:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:44.784145142 +0000 UTC m=+1795.444434761" watchObservedRunningTime="2025-11-11 13:59:44.793774937 +0000 UTC m=+1795.454064556" Nov 11 13:59:45 crc kubenswrapper[4842]: I1111 13:59:45.303447 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-fxmwr"] Nov 11 13:59:45 crc kubenswrapper[4842]: I1111 13:59:45.802775 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" event={"ID":"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9","Type":"ContainerStarted","Data":"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec"} Nov 11 13:59:45 crc kubenswrapper[4842]: I1111 13:59:45.803980 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:45 crc kubenswrapper[4842]: I1111 13:59:45.835591 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" podStartSLOduration=2.835571916 podStartE2EDuration="2.835571916s" podCreationTimestamp="2025-11-11 13:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:45.829957438 +0000 UTC m=+1796.490247067" watchObservedRunningTime="2025-11-11 13:59:45.835571916 +0000 UTC m=+1796.495861535" Nov 11 13:59:46 crc kubenswrapper[4842]: I1111 13:59:46.620151 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:46 crc kubenswrapper[4842]: I1111 13:59:46.698720 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 13:59:46 crc kubenswrapper[4842]: W1111 13:59:46.804072 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode53dd0e9_39e2_4158_97dc_6a28d3b14b5f.slice/crio-44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819 WatchSource:0}: Error finding container 44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819: Status 404 returned error can't find the container with id 44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819 Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.822880 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" event={"ID":"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f","Type":"ContainerStarted","Data":"e00dda4eec59475414c3616691804a6d3344a64857746dbd3aebe4b9e86395c0"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.822928 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" event={"ID":"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f","Type":"ContainerStarted","Data":"44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.826318 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerStarted","Data":"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.826373 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerStarted","Data":"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.826389 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-log" containerID="cri-o://7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" gracePeriod=30 Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.826448 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-metadata" containerID="cri-o://8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" gracePeriod=30 Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.829248 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2d5e92bc-1b70-455d-9397-985e2b92f0e0","Type":"ContainerStarted","Data":"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.829350 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec" gracePeriod=30 Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.832451 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerStarted","Data":"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.832480 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerStarted","Data":"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.834488 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c794bc2f-a5a8-4ae5-836e-18367511637e","Type":"ContainerStarted","Data":"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c"} Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.846564 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" podStartSLOduration=3.846541439 podStartE2EDuration="3.846541439s" podCreationTimestamp="2025-11-11 13:59:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:47.839813845 +0000 UTC m=+1798.500103484" watchObservedRunningTime="2025-11-11 13:59:47.846541439 +0000 UTC m=+1798.506831058" Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.863392 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.837381181 podStartE2EDuration="5.863374804s" podCreationTimestamp="2025-11-11 13:59:42 +0000 UTC" firstStartedPulling="2025-11-11 13:59:43.882334586 +0000 UTC m=+1794.542624205" lastFinishedPulling="2025-11-11 13:59:46.908328209 +0000 UTC m=+1797.568617828" observedRunningTime="2025-11-11 13:59:47.861478644 +0000 UTC m=+1798.521768273" watchObservedRunningTime="2025-11-11 13:59:47.863374804 +0000 UTC m=+1798.523664433" Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.877251 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.158353702 podStartE2EDuration="5.877232194s" podCreationTimestamp="2025-11-11 13:59:42 +0000 UTC" firstStartedPulling="2025-11-11 13:59:44.211308201 +0000 UTC m=+1794.871597820" lastFinishedPulling="2025-11-11 13:59:46.930186683 +0000 UTC m=+1797.590476312" observedRunningTime="2025-11-11 13:59:47.876533471 +0000 UTC m=+1798.536823100" watchObservedRunningTime="2025-11-11 13:59:47.877232194 +0000 UTC m=+1798.537521813" Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.892650 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.550129473 podStartE2EDuration="4.892628973s" podCreationTimestamp="2025-11-11 13:59:43 +0000 UTC" firstStartedPulling="2025-11-11 13:59:44.583904163 +0000 UTC m=+1795.244193782" lastFinishedPulling="2025-11-11 13:59:46.926403663 +0000 UTC m=+1797.586693282" observedRunningTime="2025-11-11 13:59:47.889649278 +0000 UTC m=+1798.549938897" watchObservedRunningTime="2025-11-11 13:59:47.892628973 +0000 UTC m=+1798.552918592" Nov 11 13:59:47 crc kubenswrapper[4842]: I1111 13:59:47.914728 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.423333787 podStartE2EDuration="5.914701284s" podCreationTimestamp="2025-11-11 13:59:42 +0000 UTC" firstStartedPulling="2025-11-11 13:59:44.439783087 +0000 UTC m=+1795.100072696" lastFinishedPulling="2025-11-11 13:59:46.931150564 +0000 UTC m=+1797.591440193" observedRunningTime="2025-11-11 13:59:47.907111712 +0000 UTC m=+1798.567401341" watchObservedRunningTime="2025-11-11 13:59:47.914701284 +0000 UTC m=+1798.574990903" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.229089 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.499864 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.628571 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.645516 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs\") pod \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.645805 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle\") pod \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.646014 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztr2n\" (UniqueName: \"kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n\") pod \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.646131 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data\") pod \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\" (UID: \"e91d7863-1ecc-487b-a9fa-25c800e3d54a\") " Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.647804 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs" (OuterVolumeSpecName: "logs") pod "e91d7863-1ecc-487b-a9fa-25c800e3d54a" (UID: "e91d7863-1ecc-487b-a9fa-25c800e3d54a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.653408 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n" (OuterVolumeSpecName: "kube-api-access-ztr2n") pod "e91d7863-1ecc-487b-a9fa-25c800e3d54a" (UID: "e91d7863-1ecc-487b-a9fa-25c800e3d54a"). InnerVolumeSpecName "kube-api-access-ztr2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.675890 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data" (OuterVolumeSpecName: "config-data") pod "e91d7863-1ecc-487b-a9fa-25c800e3d54a" (UID: "e91d7863-1ecc-487b-a9fa-25c800e3d54a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.682643 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e91d7863-1ecc-487b-a9fa-25c800e3d54a" (UID: "e91d7863-1ecc-487b-a9fa-25c800e3d54a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.748554 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e91d7863-1ecc-487b-a9fa-25c800e3d54a-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.749183 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.749290 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztr2n\" (UniqueName: \"kubernetes.io/projected/e91d7863-1ecc-487b-a9fa-25c800e3d54a-kube-api-access-ztr2n\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.749374 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e91d7863-1ecc-487b-a9fa-25c800e3d54a-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.854149 4842 generic.go:334] "Generic (PLEG): container finished" podID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerID="8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" exitCode=0 Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.855308 4842 generic.go:334] "Generic (PLEG): container finished" podID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerID="7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" exitCode=143 Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.855253 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerDied","Data":"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0"} Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.855273 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.856310 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerDied","Data":"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345"} Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.856479 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e91d7863-1ecc-487b-a9fa-25c800e3d54a","Type":"ContainerDied","Data":"54687dab5d02e48fc0b562769261a4b8ac78a3184b0810a16cef835d19a317fc"} Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.856400 4842 scope.go:117] "RemoveContainer" containerID="8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.890879 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.901308 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.902394 4842 scope.go:117] "RemoveContainer" containerID="7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.919932 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:48 crc kubenswrapper[4842]: E1111 13:59:48.920883 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-metadata" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.921243 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-metadata" Nov 11 13:59:48 crc kubenswrapper[4842]: E1111 13:59:48.921354 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-log" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.921427 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-log" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.921826 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-metadata" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.922257 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" containerName="nova-metadata-log" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.923779 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.938384 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.944574 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.948502 4842 scope.go:117] "RemoveContainer" containerID="8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" Nov 11 13:59:48 crc kubenswrapper[4842]: E1111 13:59:48.949617 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0\": container with ID starting with 8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0 not found: ID does not exist" containerID="8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.949700 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0"} err="failed to get container status \"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0\": rpc error: code = NotFound desc = could not find container \"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0\": container with ID starting with 8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0 not found: ID does not exist" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.949735 4842 scope.go:117] "RemoveContainer" containerID="7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" Nov 11 13:59:48 crc kubenswrapper[4842]: E1111 13:59:48.950795 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345\": container with ID starting with 7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345 not found: ID does not exist" containerID="7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.950932 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345"} err="failed to get container status \"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345\": rpc error: code = NotFound desc = could not find container \"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345\": container with ID starting with 7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345 not found: ID does not exist" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.951651 4842 scope.go:117] "RemoveContainer" containerID="8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.953206 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0"} err="failed to get container status \"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0\": rpc error: code = NotFound desc = could not find container \"8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0\": container with ID starting with 8a992eb87a1508b8ab729ff519470a1142b5093a09b0d0015065aef79f7e95c0 not found: ID does not exist" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.953246 4842 scope.go:117] "RemoveContainer" containerID="7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.954810 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345"} err="failed to get container status \"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345\": rpc error: code = NotFound desc = could not find container \"7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345\": container with ID starting with 7e4976e463325a4dcea8496d24f17ab9cbff7d4a961380d046356c05e3093345 not found: ID does not exist" Nov 11 13:59:48 crc kubenswrapper[4842]: I1111 13:59:48.979352 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.060133 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 13:59:49 crc kubenswrapper[4842]: E1111 13:59:49.060417 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.065419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.065479 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.065543 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.065565 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vdbp\" (UniqueName: \"kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.065580 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.167568 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.167663 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.167787 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.167813 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vdbp\" (UniqueName: \"kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.167835 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.168135 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.172620 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.173502 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.181264 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.183989 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vdbp\" (UniqueName: \"kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp\") pod \"nova-metadata-0\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.271337 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.790464 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:49 crc kubenswrapper[4842]: I1111 13:59:49.900203 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerStarted","Data":"106172a42c96fa8730407f57147d8b5c530f976986e6aed210a0d7a9f0b66a13"} Nov 11 13:59:50 crc kubenswrapper[4842]: I1111 13:59:50.080461 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e91d7863-1ecc-487b-a9fa-25c800e3d54a" path="/var/lib/kubelet/pods/e91d7863-1ecc-487b-a9fa-25c800e3d54a/volumes" Nov 11 13:59:50 crc kubenswrapper[4842]: I1111 13:59:50.910057 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerStarted","Data":"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8"} Nov 11 13:59:50 crc kubenswrapper[4842]: I1111 13:59:50.910375 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerStarted","Data":"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030"} Nov 11 13:59:50 crc kubenswrapper[4842]: I1111 13:59:50.929149 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.929127968 podStartE2EDuration="2.929127968s" podCreationTimestamp="2025-11-11 13:59:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:50.928009512 +0000 UTC m=+1801.588299141" watchObservedRunningTime="2025-11-11 13:59:50.929127968 +0000 UTC m=+1801.589417587" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.193222 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.193558 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.229290 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.263690 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.604241 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.672279 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.672494 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="dnsmasq-dns" containerID="cri-o://ae523ba92374e98b41f2d246d915e2b3b7b6393469dd3fe27205f33a0543611a" gracePeriod=10 Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.939712 4842 generic.go:334] "Generic (PLEG): container finished" podID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerID="ae523ba92374e98b41f2d246d915e2b3b7b6393469dd3fe27205f33a0543611a" exitCode=0 Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.939787 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" event={"ID":"85d010dc-9a24-4773-bfa5-07d453eb0ab4","Type":"ContainerDied","Data":"ae523ba92374e98b41f2d246d915e2b3b7b6393469dd3fe27205f33a0543611a"} Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.950314 4842 generic.go:334] "Generic (PLEG): container finished" podID="a471b182-7b05-4a81-93eb-257a2ce28a68" containerID="41ba1f5abbba2b1bf69614a768a73e9b4f9571536f9042c60b3bf1beb4f0a195" exitCode=0 Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.951239 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tbtw2" event={"ID":"a471b182-7b05-4a81-93eb-257a2ce28a68","Type":"ContainerDied","Data":"41ba1f5abbba2b1bf69614a768a73e9b4f9571536f9042c60b3bf1beb4f0a195"} Nov 11 13:59:53 crc kubenswrapper[4842]: I1111 13:59:53.989230 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.270286 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.272444 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.284417 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.284828 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.362799 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.506147 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.506511 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.506605 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.506736 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.506879 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.507075 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bb8l9\" (UniqueName: \"kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9\") pod \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\" (UID: \"85d010dc-9a24-4773-bfa5-07d453eb0ab4\") " Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.512865 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9" (OuterVolumeSpecName: "kube-api-access-bb8l9") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "kube-api-access-bb8l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.563085 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.569448 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.572733 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config" (OuterVolumeSpecName: "config") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.575714 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.609971 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-config\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.610128 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.610144 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bb8l9\" (UniqueName: \"kubernetes.io/projected/85d010dc-9a24-4773-bfa5-07d453eb0ab4-kube-api-access-bb8l9\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.610157 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.610287 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.612455 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "85d010dc-9a24-4773-bfa5-07d453eb0ab4" (UID: "85d010dc-9a24-4773-bfa5-07d453eb0ab4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.712417 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/85d010dc-9a24-4773-bfa5-07d453eb0ab4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.961603 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" event={"ID":"85d010dc-9a24-4773-bfa5-07d453eb0ab4","Type":"ContainerDied","Data":"e51d4143494f348e7f9cb9d2a674e092e9c5a6a5a01e690a90da3cb2c1ecf6f8"} Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.961674 4842 scope.go:117] "RemoveContainer" containerID="ae523ba92374e98b41f2d246d915e2b3b7b6393469dd3fe27205f33a0543611a" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.962716 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586fb9b84f-djjqv" Nov 11 13:59:54 crc kubenswrapper[4842]: I1111 13:59:54.995056 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.007288 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586fb9b84f-djjqv"] Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.018762 4842 scope.go:117] "RemoveContainer" containerID="fbbbac5ff73c19ce3ebd43bbf71619895fa03a11c55bce2d04e7759dab3ac95e" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.447350 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.528944 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfvh9\" (UniqueName: \"kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9\") pod \"a471b182-7b05-4a81-93eb-257a2ce28a68\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.529118 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data\") pod \"a471b182-7b05-4a81-93eb-257a2ce28a68\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.529218 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle\") pod \"a471b182-7b05-4a81-93eb-257a2ce28a68\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.529263 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts\") pod \"a471b182-7b05-4a81-93eb-257a2ce28a68\" (UID: \"a471b182-7b05-4a81-93eb-257a2ce28a68\") " Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.534314 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9" (OuterVolumeSpecName: "kube-api-access-pfvh9") pod "a471b182-7b05-4a81-93eb-257a2ce28a68" (UID: "a471b182-7b05-4a81-93eb-257a2ce28a68"). InnerVolumeSpecName "kube-api-access-pfvh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.547625 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts" (OuterVolumeSpecName: "scripts") pod "a471b182-7b05-4a81-93eb-257a2ce28a68" (UID: "a471b182-7b05-4a81-93eb-257a2ce28a68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.562058 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data" (OuterVolumeSpecName: "config-data") pod "a471b182-7b05-4a81-93eb-257a2ce28a68" (UID: "a471b182-7b05-4a81-93eb-257a2ce28a68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.563688 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a471b182-7b05-4a81-93eb-257a2ce28a68" (UID: "a471b182-7b05-4a81-93eb-257a2ce28a68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.633147 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.633177 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.633208 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a471b182-7b05-4a81-93eb-257a2ce28a68-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.633220 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfvh9\" (UniqueName: \"kubernetes.io/projected/a471b182-7b05-4a81-93eb-257a2ce28a68-kube-api-access-pfvh9\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.973982 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tbtw2" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.976899 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tbtw2" event={"ID":"a471b182-7b05-4a81-93eb-257a2ce28a68","Type":"ContainerDied","Data":"040e059a13b4b243454d31c85b75f65c43efe36a8f08ba77f595e8c659b69033"} Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.976944 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="040e059a13b4b243454d31c85b75f65c43efe36a8f08ba77f595e8c659b69033" Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.979351 4842 generic.go:334] "Generic (PLEG): container finished" podID="e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" containerID="e00dda4eec59475414c3616691804a6d3344a64857746dbd3aebe4b9e86395c0" exitCode=0 Nov 11 13:59:55 crc kubenswrapper[4842]: I1111 13:59:55.979412 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" event={"ID":"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f","Type":"ContainerDied","Data":"e00dda4eec59475414c3616691804a6d3344a64857746dbd3aebe4b9e86395c0"} Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.076463 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" path="/var/lib/kubelet/pods/85d010dc-9a24-4773-bfa5-07d453eb0ab4/volumes" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.087087 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.087273 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerName="nova-scheduler-scheduler" containerID="cri-o://a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" gracePeriod=30 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.121369 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.121655 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-log" containerID="cri-o://c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8" gracePeriod=30 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.122050 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-api" containerID="cri-o://506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429" gracePeriod=30 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.136119 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.136331 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-log" containerID="cri-o://7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" gracePeriod=30 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.136758 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-metadata" containerID="cri-o://dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" gracePeriod=30 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.642483 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.758612 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs\") pod \"759e0148-3900-4e1d-b990-de5f3b5fc610\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.758698 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle\") pod \"759e0148-3900-4e1d-b990-de5f3b5fc610\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.758793 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data\") pod \"759e0148-3900-4e1d-b990-de5f3b5fc610\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.758853 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs\") pod \"759e0148-3900-4e1d-b990-de5f3b5fc610\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.758952 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vdbp\" (UniqueName: \"kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp\") pod \"759e0148-3900-4e1d-b990-de5f3b5fc610\" (UID: \"759e0148-3900-4e1d-b990-de5f3b5fc610\") " Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.759418 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs" (OuterVolumeSpecName: "logs") pod "759e0148-3900-4e1d-b990-de5f3b5fc610" (UID: "759e0148-3900-4e1d-b990-de5f3b5fc610"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.759701 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/759e0148-3900-4e1d-b990-de5f3b5fc610-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.763266 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp" (OuterVolumeSpecName: "kube-api-access-9vdbp") pod "759e0148-3900-4e1d-b990-de5f3b5fc610" (UID: "759e0148-3900-4e1d-b990-de5f3b5fc610"). InnerVolumeSpecName "kube-api-access-9vdbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.788005 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "759e0148-3900-4e1d-b990-de5f3b5fc610" (UID: "759e0148-3900-4e1d-b990-de5f3b5fc610"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.804417 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data" (OuterVolumeSpecName: "config-data") pod "759e0148-3900-4e1d-b990-de5f3b5fc610" (UID: "759e0148-3900-4e1d-b990-de5f3b5fc610"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.810736 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "759e0148-3900-4e1d-b990-de5f3b5fc610" (UID: "759e0148-3900-4e1d-b990-de5f3b5fc610"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.861961 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vdbp\" (UniqueName: \"kubernetes.io/projected/759e0148-3900-4e1d-b990-de5f3b5fc610-kube-api-access-9vdbp\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.862001 4842 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.862014 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.862024 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/759e0148-3900-4e1d-b990-de5f3b5fc610-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995026 4842 generic.go:334] "Generic (PLEG): container finished" podID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerID="dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" exitCode=0 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995061 4842 generic.go:334] "Generic (PLEG): container finished" podID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerID="7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" exitCode=143 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995132 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995135 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerDied","Data":"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8"} Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995271 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerDied","Data":"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030"} Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995292 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"759e0148-3900-4e1d-b990-de5f3b5fc610","Type":"ContainerDied","Data":"106172a42c96fa8730407f57147d8b5c530f976986e6aed210a0d7a9f0b66a13"} Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.995317 4842 scope.go:117] "RemoveContainer" containerID="dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.997181 4842 generic.go:334] "Generic (PLEG): container finished" podID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerID="c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8" exitCode=143 Nov 11 13:59:56 crc kubenswrapper[4842]: I1111 13:59:56.997455 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerDied","Data":"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8"} Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.026423 4842 scope.go:117] "RemoveContainer" containerID="7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.035414 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.046883 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.057402 4842 scope.go:117] "RemoveContainer" containerID="dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.059183 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8\": container with ID starting with dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8 not found: ID does not exist" containerID="dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059215 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8"} err="failed to get container status \"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8\": rpc error: code = NotFound desc = could not find container \"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8\": container with ID starting with dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8 not found: ID does not exist" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059236 4842 scope.go:117] "RemoveContainer" containerID="7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059291 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.059506 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030\": container with ID starting with 7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030 not found: ID does not exist" containerID="7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059545 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030"} err="failed to get container status \"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030\": rpc error: code = NotFound desc = could not find container \"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030\": container with ID starting with 7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030 not found: ID does not exist" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059567 4842 scope.go:117] "RemoveContainer" containerID="dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059789 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8"} err="failed to get container status \"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8\": rpc error: code = NotFound desc = could not find container \"dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8\": container with ID starting with dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8 not found: ID does not exist" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.059806 4842 scope.go:117] "RemoveContainer" containerID="7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.063787 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030"} err="failed to get container status \"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030\": rpc error: code = NotFound desc = could not find container \"7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030\": container with ID starting with 7725c1c2090d2cbdcc7e9dd2ddc56b59f88ef7b28a3313c9263010e85e8a2030 not found: ID does not exist" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.064876 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="init" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.064899 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="init" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.064932 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-log" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.064939 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-log" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.064966 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-metadata" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.064972 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-metadata" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.064998 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="dnsmasq-dns" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065003 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="dnsmasq-dns" Nov 11 13:59:57 crc kubenswrapper[4842]: E1111 13:59:57.065028 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a471b182-7b05-4a81-93eb-257a2ce28a68" containerName="nova-manage" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065036 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a471b182-7b05-4a81-93eb-257a2ce28a68" containerName="nova-manage" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065645 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-log" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065679 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a471b182-7b05-4a81-93eb-257a2ce28a68" containerName="nova-manage" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065690 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="85d010dc-9a24-4773-bfa5-07d453eb0ab4" containerName="dnsmasq-dns" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.065724 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" containerName="nova-metadata-metadata" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.083948 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.084104 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.092103 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.092392 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.174228 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.174398 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.174577 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.174712 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2z8k\" (UniqueName: \"kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.174831 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277300 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277401 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277451 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2z8k\" (UniqueName: \"kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277486 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277513 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.277898 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.282952 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.283841 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.291665 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.294894 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2z8k\" (UniqueName: \"kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k\") pod \"nova-metadata-0\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.383168 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.420549 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.480382 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4glqh\" (UniqueName: \"kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh\") pod \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.480501 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data\") pod \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.480591 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle\") pod \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.480703 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts\") pod \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\" (UID: \"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f\") " Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.485220 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts" (OuterVolumeSpecName: "scripts") pod "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" (UID: "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.485569 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh" (OuterVolumeSpecName: "kube-api-access-4glqh") pod "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" (UID: "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f"). InnerVolumeSpecName "kube-api-access-4glqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.514341 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data" (OuterVolumeSpecName: "config-data") pod "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" (UID: "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.522115 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" (UID: "e53dd0e9-39e2-4158-97dc-6a28d3b14b5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.583712 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.583908 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.584177 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4glqh\" (UniqueName: \"kubernetes.io/projected/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-kube-api-access-4glqh\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.584191 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:57 crc kubenswrapper[4842]: I1111 13:59:57.899854 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.009179 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerStarted","Data":"bedcc7c849010dc4880c218deda54d83e97f070fcbb6bd8c0f9deb7c08dd9fa9"} Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.012814 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" event={"ID":"e53dd0e9-39e2-4158-97dc-6a28d3b14b5f","Type":"ContainerDied","Data":"44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819"} Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.012867 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-fxmwr" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.012874 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44e67226622c313fba006a904c79f609f9806e6d17de9546f602efb485852819" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.070409 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="759e0148-3900-4e1d-b990-de5f3b5fc610" path="/var/lib/kubelet/pods/759e0148-3900-4e1d-b990-de5f3b5fc610/volumes" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.071064 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 11 13:59:58 crc kubenswrapper[4842]: E1111 13:59:58.071742 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" containerName="nova-cell1-conductor-db-sync" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.071761 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" containerName="nova-cell1-conductor-db-sync" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.071965 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" containerName="nova-cell1-conductor-db-sync" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.072863 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.074898 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.082113 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.198274 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.198653 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.198779 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scr9s\" (UniqueName: \"kubernetes.io/projected/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-kube-api-access-scr9s\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: E1111 13:59:58.231698 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 11 13:59:58 crc kubenswrapper[4842]: E1111 13:59:58.233468 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 11 13:59:58 crc kubenswrapper[4842]: E1111 13:59:58.234774 4842 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 11 13:59:58 crc kubenswrapper[4842]: E1111 13:59:58.234809 4842 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerName="nova-scheduler-scheduler" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.300362 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scr9s\" (UniqueName: \"kubernetes.io/projected/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-kube-api-access-scr9s\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.300490 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.300514 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.304534 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.304750 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.316146 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scr9s\" (UniqueName: \"kubernetes.io/projected/7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49-kube-api-access-scr9s\") pod \"nova-cell1-conductor-0\" (UID: \"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49\") " pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.407885 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 11 13:59:58 crc kubenswrapper[4842]: I1111 13:59:58.870610 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.026078 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerStarted","Data":"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803"} Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.026146 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerStarted","Data":"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524"} Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.027146 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49","Type":"ContainerStarted","Data":"200ff5f24ddf4ed166093062945819f850bbffa9faf3ba94a130715244bd3cce"} Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.047069 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.047049938 podStartE2EDuration="2.047049938s" podCreationTimestamp="2025-11-11 13:59:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 13:59:59.044017702 +0000 UTC m=+1809.704307331" watchObservedRunningTime="2025-11-11 13:59:59.047049938 +0000 UTC m=+1809.707339557" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.414284 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.523547 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data\") pod \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.523609 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b79dw\" (UniqueName: \"kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw\") pod \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.523676 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs\") pod \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.523747 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle\") pod \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\" (UID: \"2bb18f21-174c-4d4f-b984-aca9ae17ea76\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.524853 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs" (OuterVolumeSpecName: "logs") pod "2bb18f21-174c-4d4f-b984-aca9ae17ea76" (UID: "2bb18f21-174c-4d4f-b984-aca9ae17ea76"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.528461 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw" (OuterVolumeSpecName: "kube-api-access-b79dw") pod "2bb18f21-174c-4d4f-b984-aca9ae17ea76" (UID: "2bb18f21-174c-4d4f-b984-aca9ae17ea76"). InnerVolumeSpecName "kube-api-access-b79dw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.554312 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data" (OuterVolumeSpecName: "config-data") pod "2bb18f21-174c-4d4f-b984-aca9ae17ea76" (UID: "2bb18f21-174c-4d4f-b984-aca9ae17ea76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.564321 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bb18f21-174c-4d4f-b984-aca9ae17ea76" (UID: "2bb18f21-174c-4d4f-b984-aca9ae17ea76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.626892 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.627266 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b79dw\" (UniqueName: \"kubernetes.io/projected/2bb18f21-174c-4d4f-b984-aca9ae17ea76-kube-api-access-b79dw\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.627279 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2bb18f21-174c-4d4f-b984-aca9ae17ea76-logs\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.627289 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bb18f21-174c-4d4f-b984-aca9ae17ea76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.709573 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.831064 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data\") pod \"c794bc2f-a5a8-4ae5-836e-18367511637e\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.831392 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle\") pod \"c794bc2f-a5a8-4ae5-836e-18367511637e\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.831456 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzwxp\" (UniqueName: \"kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp\") pod \"c794bc2f-a5a8-4ae5-836e-18367511637e\" (UID: \"c794bc2f-a5a8-4ae5-836e-18367511637e\") " Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.835911 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp" (OuterVolumeSpecName: "kube-api-access-xzwxp") pod "c794bc2f-a5a8-4ae5-836e-18367511637e" (UID: "c794bc2f-a5a8-4ae5-836e-18367511637e"). InnerVolumeSpecName "kube-api-access-xzwxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.864286 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data" (OuterVolumeSpecName: "config-data") pod "c794bc2f-a5a8-4ae5-836e-18367511637e" (UID: "c794bc2f-a5a8-4ae5-836e-18367511637e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.871247 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c794bc2f-a5a8-4ae5-836e-18367511637e" (UID: "c794bc2f-a5a8-4ae5-836e-18367511637e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.933731 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.933772 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c794bc2f-a5a8-4ae5-836e-18367511637e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 13:59:59 crc kubenswrapper[4842]: I1111 13:59:59.933789 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzwxp\" (UniqueName: \"kubernetes.io/projected/c794bc2f-a5a8-4ae5-836e-18367511637e-kube-api-access-xzwxp\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.037836 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49","Type":"ContainerStarted","Data":"1da84419a33d3b27359b29d641002fd966365e81d741f0105e10015d8b529b7e"} Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.037917 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.040139 4842 generic.go:334] "Generic (PLEG): container finished" podID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerID="506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429" exitCode=0 Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.040186 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.040189 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerDied","Data":"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429"} Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.040236 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2bb18f21-174c-4d4f-b984-aca9ae17ea76","Type":"ContainerDied","Data":"c8a83ee5f74c4a4dcfe07857947c5f01235e6a8098c98417bf7b5ef4b73046a9"} Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.040261 4842 scope.go:117] "RemoveContainer" containerID="506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.042399 4842 generic.go:334] "Generic (PLEG): container finished" podID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" exitCode=0 Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.042463 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c794bc2f-a5a8-4ae5-836e-18367511637e","Type":"ContainerDied","Data":"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c"} Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.042500 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c794bc2f-a5a8-4ae5-836e-18367511637e","Type":"ContainerDied","Data":"6acc90cda0b89b4b36930bbf3fb24b4feb924621dc15bb4c80745bd7897725d0"} Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.042549 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.062975 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.062948475 podStartE2EDuration="2.062948475s" podCreationTimestamp="2025-11-11 13:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:00.056095278 +0000 UTC m=+1810.716384897" watchObservedRunningTime="2025-11-11 14:00:00.062948475 +0000 UTC m=+1810.723238104" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.072866 4842 scope.go:117] "RemoveContainer" containerID="c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.109344 4842 scope.go:117] "RemoveContainer" containerID="506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429" Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.121910 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429\": container with ID starting with 506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429 not found: ID does not exist" containerID="506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.122010 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429"} err="failed to get container status \"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429\": rpc error: code = NotFound desc = could not find container \"506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429\": container with ID starting with 506d2502fdb97186855767ea33beac683d6f955439db0503c45bb0e85423b429 not found: ID does not exist" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.122042 4842 scope.go:117] "RemoveContainer" containerID="c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8" Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.128291 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8\": container with ID starting with c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8 not found: ID does not exist" containerID="c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.128395 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8"} err="failed to get container status \"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8\": rpc error: code = NotFound desc = could not find container \"c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8\": container with ID starting with c25d05dafa911e6ac133de4066804c4b58d2383ddbdd58c8efbab328492199c8 not found: ID does not exist" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.128428 4842 scope.go:117] "RemoveContainer" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.145566 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.183466 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.195081 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.195646 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-api" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.195675 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-api" Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.195705 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerName="nova-scheduler-scheduler" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.195714 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerName="nova-scheduler-scheduler" Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.195755 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-log" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.195763 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-log" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.196000 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-api" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.196038 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" containerName="nova-api-log" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.196055 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" containerName="nova-scheduler-scheduler" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.197444 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.198071 4842 scope.go:117] "RemoveContainer" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" Nov 11 14:00:00 crc kubenswrapper[4842]: E1111 14:00:00.202418 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c\": container with ID starting with a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c not found: ID does not exist" containerID="a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.202469 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c"} err="failed to get container status \"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c\": rpc error: code = NotFound desc = could not find container \"a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c\": container with ID starting with a2025730b521d6b53f3db1f85dd8faa999772dc0efe6539ba1e13bd3c45feb6c not found: ID does not exist" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.202996 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.215664 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.227180 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.238912 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.249609 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.251420 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.254333 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.262627 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.271543 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.272925 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.273997 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.274323 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.282688 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq"] Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344295 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344316 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c2vt\" (UniqueName: \"kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344360 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344390 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84kmn\" (UniqueName: \"kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344419 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts7h4\" (UniqueName: \"kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344481 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344516 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344588 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.344782 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446528 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446576 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446598 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446613 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c2vt\" (UniqueName: \"kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446640 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446658 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84kmn\" (UniqueName: \"kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446681 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts7h4\" (UniqueName: \"kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446714 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446740 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.446767 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.450297 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.450852 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.450969 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.452060 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.452486 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.453883 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.456634 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.464469 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84kmn\" (UniqueName: \"kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn\") pod \"collect-profiles-29381160-k85hq\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.466579 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c2vt\" (UniqueName: \"kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt\") pod \"nova-scheduler-0\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.468039 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts7h4\" (UniqueName: \"kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4\") pod \"nova-api-0\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.521203 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.575596 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:00 crc kubenswrapper[4842]: I1111 14:00:00.593452 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:01 crc kubenswrapper[4842]: I1111 14:00:01.117207 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:01 crc kubenswrapper[4842]: I1111 14:00:01.182746 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq"] Nov 11 14:00:01 crc kubenswrapper[4842]: I1111 14:00:01.192636 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.071421 4842 generic.go:334] "Generic (PLEG): container finished" podID="33473766-ea57-42af-8f97-f111ef36b159" containerID="7dfd97f5b25ad997eb0875203e2b586cd0b3e9a4cabaa1b6f088b7512c3e2bc7" exitCode=0 Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.072781 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bb18f21-174c-4d4f-b984-aca9ae17ea76" path="/var/lib/kubelet/pods/2bb18f21-174c-4d4f-b984-aca9ae17ea76/volumes" Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073373 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c794bc2f-a5a8-4ae5-836e-18367511637e" path="/var/lib/kubelet/pods/c794bc2f-a5a8-4ae5-836e-18367511637e/volumes" Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073864 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerStarted","Data":"eca63ed90cd311fbf4f31f860cb83f8f215f858e6db7fb6ec82d617b72accb3a"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073888 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerStarted","Data":"ed148a42c5cc75287233d48a11afa95339c3436d5af5f9b52066abe9ff121f30"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073898 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerStarted","Data":"8359b090d7344a19cbd2419d13c4259bac6d00df81bc48debbfdd4123a142327"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073907 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" event={"ID":"33473766-ea57-42af-8f97-f111ef36b159","Type":"ContainerDied","Data":"7dfd97f5b25ad997eb0875203e2b586cd0b3e9a4cabaa1b6f088b7512c3e2bc7"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.073919 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" event={"ID":"33473766-ea57-42af-8f97-f111ef36b159","Type":"ContainerStarted","Data":"c09f7945b8f8f5a6e714b3e35b5519b788b7742da9696fb11fa5e8dae5608696"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.074048 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4208c47d-d376-4eb3-9751-b19e6b672359","Type":"ContainerStarted","Data":"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.074085 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4208c47d-d376-4eb3-9751-b19e6b672359","Type":"ContainerStarted","Data":"b10255fa046d88d36829cbcb6548ccca9469c73289e6626a5950a32984a1c44c"} Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.094487 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.094466881 podStartE2EDuration="2.094466881s" podCreationTimestamp="2025-11-11 14:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:02.086859418 +0000 UTC m=+1812.747149037" watchObservedRunningTime="2025-11-11 14:00:02.094466881 +0000 UTC m=+1812.754756500" Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.106715 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.106696108 podStartE2EDuration="2.106696108s" podCreationTimestamp="2025-11-11 14:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:02.104873441 +0000 UTC m=+1812.765163060" watchObservedRunningTime="2025-11-11 14:00:02.106696108 +0000 UTC m=+1812.766985727" Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.422143 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 14:00:02 crc kubenswrapper[4842]: I1111 14:00:02.422502 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.061549 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:00:03 crc kubenswrapper[4842]: E1111 14:00:03.062288 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.442945 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.514046 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume\") pod \"33473766-ea57-42af-8f97-f111ef36b159\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.514203 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume\") pod \"33473766-ea57-42af-8f97-f111ef36b159\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.514870 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume" (OuterVolumeSpecName: "config-volume") pod "33473766-ea57-42af-8f97-f111ef36b159" (UID: "33473766-ea57-42af-8f97-f111ef36b159"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.515315 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84kmn\" (UniqueName: \"kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn\") pod \"33473766-ea57-42af-8f97-f111ef36b159\" (UID: \"33473766-ea57-42af-8f97-f111ef36b159\") " Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.515733 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33473766-ea57-42af-8f97-f111ef36b159-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.520148 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "33473766-ea57-42af-8f97-f111ef36b159" (UID: "33473766-ea57-42af-8f97-f111ef36b159"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.520482 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn" (OuterVolumeSpecName: "kube-api-access-84kmn") pod "33473766-ea57-42af-8f97-f111ef36b159" (UID: "33473766-ea57-42af-8f97-f111ef36b159"). InnerVolumeSpecName "kube-api-access-84kmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.617435 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84kmn\" (UniqueName: \"kubernetes.io/projected/33473766-ea57-42af-8f97-f111ef36b159-kube-api-access-84kmn\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:03 crc kubenswrapper[4842]: I1111 14:00:03.617454 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33473766-ea57-42af-8f97-f111ef36b159-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:04 crc kubenswrapper[4842]: I1111 14:00:04.091141 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" event={"ID":"33473766-ea57-42af-8f97-f111ef36b159","Type":"ContainerDied","Data":"c09f7945b8f8f5a6e714b3e35b5519b788b7742da9696fb11fa5e8dae5608696"} Nov 11 14:00:04 crc kubenswrapper[4842]: I1111 14:00:04.091198 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c09f7945b8f8f5a6e714b3e35b5519b788b7742da9696fb11fa5e8dae5608696" Nov 11 14:00:04 crc kubenswrapper[4842]: I1111 14:00:04.091209 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq" Nov 11 14:00:05 crc kubenswrapper[4842]: I1111 14:00:05.318806 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 11 14:00:05 crc kubenswrapper[4842]: I1111 14:00:05.576559 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 11 14:00:07 crc kubenswrapper[4842]: I1111 14:00:07.421398 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 11 14:00:07 crc kubenswrapper[4842]: I1111 14:00:07.421457 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 11 14:00:08 crc kubenswrapper[4842]: I1111 14:00:08.437287 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.211:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:08 crc kubenswrapper[4842]: I1111 14:00:08.437563 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.211:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:08 crc kubenswrapper[4842]: I1111 14:00:08.461690 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 11 14:00:08 crc kubenswrapper[4842]: I1111 14:00:08.949059 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:08 crc kubenswrapper[4842]: I1111 14:00:08.949744 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" containerName="kube-state-metrics" containerID="cri-o://a50bb1991daefd6c457e78d2e251872e382df15bcdd439d2bc80b69721300ca8" gracePeriod=30 Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.193291 4842 generic.go:334] "Generic (PLEG): container finished" podID="dc76c919-6a74-4be1-8142-3200604d22aa" containerID="a50bb1991daefd6c457e78d2e251872e382df15bcdd439d2bc80b69721300ca8" exitCode=2 Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.193405 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc76c919-6a74-4be1-8142-3200604d22aa","Type":"ContainerDied","Data":"a50bb1991daefd6c457e78d2e251872e382df15bcdd439d2bc80b69721300ca8"} Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.554071 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.631446 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2v2z\" (UniqueName: \"kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z\") pod \"dc76c919-6a74-4be1-8142-3200604d22aa\" (UID: \"dc76c919-6a74-4be1-8142-3200604d22aa\") " Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.658074 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z" (OuterVolumeSpecName: "kube-api-access-c2v2z") pod "dc76c919-6a74-4be1-8142-3200604d22aa" (UID: "dc76c919-6a74-4be1-8142-3200604d22aa"). InnerVolumeSpecName "kube-api-access-c2v2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:09 crc kubenswrapper[4842]: I1111 14:00:09.734190 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2v2z\" (UniqueName: \"kubernetes.io/projected/dc76c919-6a74-4be1-8142-3200604d22aa-kube-api-access-c2v2z\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.205866 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc76c919-6a74-4be1-8142-3200604d22aa","Type":"ContainerDied","Data":"ba4d6af5b001dec4bef4b626256d86cfb01d8c5842a79138dc1be76b5040c24f"} Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.205917 4842 scope.go:117] "RemoveContainer" containerID="a50bb1991daefd6c457e78d2e251872e382df15bcdd439d2bc80b69721300ca8" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.206235 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.281917 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.296570 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.311026 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:10 crc kubenswrapper[4842]: E1111 14:00:10.312002 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33473766-ea57-42af-8f97-f111ef36b159" containerName="collect-profiles" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.312020 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="33473766-ea57-42af-8f97-f111ef36b159" containerName="collect-profiles" Nov 11 14:00:10 crc kubenswrapper[4842]: E1111 14:00:10.312037 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" containerName="kube-state-metrics" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.312043 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" containerName="kube-state-metrics" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.312350 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="33473766-ea57-42af-8f97-f111ef36b159" containerName="collect-profiles" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.312371 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" containerName="kube-state-metrics" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.313083 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.315199 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.315267 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.319872 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.464242 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.464451 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.464728 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.464821 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8525v\" (UniqueName: \"kubernetes.io/projected/5f2c2abf-63ce-49fa-a178-57088955a295-kube-api-access-8525v\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.522314 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.522389 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.566619 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.566685 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8525v\" (UniqueName: \"kubernetes.io/projected/5f2c2abf-63ce-49fa-a178-57088955a295-kube-api-access-8525v\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.566765 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.566812 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.573939 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.575639 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.575902 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.582997 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f2c2abf-63ce-49fa-a178-57088955a295-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.585757 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8525v\" (UniqueName: \"kubernetes.io/projected/5f2c2abf-63ce-49fa-a178-57088955a295-kube-api-access-8525v\") pod \"kube-state-metrics-0\" (UID: \"5f2c2abf-63ce-49fa-a178-57088955a295\") " pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.614232 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.633184 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.975353 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.975911 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-central-agent" containerID="cri-o://ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6" gracePeriod=30 Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.975992 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="proxy-httpd" containerID="cri-o://70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc" gracePeriod=30 Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.976032 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="sg-core" containerID="cri-o://f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b" gracePeriod=30 Nov 11 14:00:10 crc kubenswrapper[4842]: I1111 14:00:10.976054 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-notification-agent" containerID="cri-o://0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990" gracePeriod=30 Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.120872 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.217026 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f2c2abf-63ce-49fa-a178-57088955a295","Type":"ContainerStarted","Data":"f32e585459e95ce69b931bb0c21eca4cbc9e23e075a59d331177d5ae72ba1ae5"} Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.219441 4842 generic.go:334] "Generic (PLEG): container finished" podID="8d9e1445-577e-4b35-9613-f513b45b3500" containerID="70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc" exitCode=0 Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.219463 4842 generic.go:334] "Generic (PLEG): container finished" podID="8d9e1445-577e-4b35-9613-f513b45b3500" containerID="f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b" exitCode=2 Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.220321 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerDied","Data":"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc"} Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.220345 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerDied","Data":"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b"} Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.256678 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.604349 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.213:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:11 crc kubenswrapper[4842]: I1111 14:00:11.604582 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.213:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.069997 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" path="/var/lib/kubelet/pods/dc76c919-6a74-4be1-8142-3200604d22aa/volumes" Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.234497 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f2c2abf-63ce-49fa-a178-57088955a295","Type":"ContainerStarted","Data":"ba0e823817fb11c56823cb4e5c49b7a9d63ec343490a7f2a37171fecf8b97212"} Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.235809 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.241705 4842 generic.go:334] "Generic (PLEG): container finished" podID="8d9e1445-577e-4b35-9613-f513b45b3500" containerID="ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6" exitCode=0 Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.242205 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerDied","Data":"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6"} Nov 11 14:00:12 crc kubenswrapper[4842]: I1111 14:00:12.263044 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.917216802 podStartE2EDuration="2.263022774s" podCreationTimestamp="2025-11-11 14:00:10 +0000 UTC" firstStartedPulling="2025-11-11 14:00:11.12869402 +0000 UTC m=+1821.788983639" lastFinishedPulling="2025-11-11 14:00:11.474499992 +0000 UTC m=+1822.134789611" observedRunningTime="2025-11-11 14:00:12.259882026 +0000 UTC m=+1822.920171645" watchObservedRunningTime="2025-11-11 14:00:12.263022774 +0000 UTC m=+1822.923312393" Nov 11 14:00:14 crc kubenswrapper[4842]: I1111 14:00:14.521038 4842 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="dc76c919-6a74-4be1-8142-3200604d22aa" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:16 crc kubenswrapper[4842]: I1111 14:00:16.060371 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:00:16 crc kubenswrapper[4842]: E1111 14:00:16.060994 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:00:17 crc kubenswrapper[4842]: I1111 14:00:17.427358 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 11 14:00:17 crc kubenswrapper[4842]: I1111 14:00:17.429591 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 11 14:00:17 crc kubenswrapper[4842]: I1111 14:00:17.435251 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 11 14:00:17 crc kubenswrapper[4842]: W1111 14:00:17.889666 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod759e0148_3900_4e1d_b990_de5f3b5fc610.slice/crio-dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8.scope WatchSource:0}: Error finding container dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8: Status 404 returned error can't find the container with id dc85e946bf5bbd7d8beb237b5c186a3a163fb6957b6aedcd0cfbd866063227f8 Nov 11 14:00:17 crc kubenswrapper[4842]: W1111 14:00:17.907158 4842 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33473766_ea57_42af_8f97_f111ef36b159.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33473766_ea57_42af_8f97_f111ef36b159.slice: no such file or directory Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.041920 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.216303 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.216908 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5slpg\" (UniqueName: \"kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.217129 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.217265 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.217369 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.217464 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.217601 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd\") pod \"8d9e1445-577e-4b35-9613-f513b45b3500\" (UID: \"8d9e1445-577e-4b35-9613-f513b45b3500\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.220386 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.223455 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.224953 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts" (OuterVolumeSpecName: "scripts") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.229268 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg" (OuterVolumeSpecName: "kube-api-access-5slpg") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "kube-api-access-5slpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.249063 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.251156 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.303936 4842 generic.go:334] "Generic (PLEG): container finished" podID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" containerID="837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec" exitCode=137 Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.304057 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2d5e92bc-1b70-455d-9397-985e2b92f0e0","Type":"ContainerDied","Data":"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec"} Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.304085 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2d5e92bc-1b70-455d-9397-985e2b92f0e0","Type":"ContainerDied","Data":"09b59231212636ac41f1f3ab7d3ce163fb9de4fe1a8930d4c903f9aca8680dca"} Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.304115 4842 scope.go:117] "RemoveContainer" containerID="837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.304271 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.314788 4842 generic.go:334] "Generic (PLEG): container finished" podID="8d9e1445-577e-4b35-9613-f513b45b3500" containerID="0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990" exitCode=0 Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.315124 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.316045 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerDied","Data":"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990"} Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.316086 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d9e1445-577e-4b35-9613-f513b45b3500","Type":"ContainerDied","Data":"a191cbd7fdf7a76f5ca6ffc4da37583756d823e7387b9f71b8ff0a2cced8b9c3"} Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.316837 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319834 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319862 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319878 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319889 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d9e1445-577e-4b35-9613-f513b45b3500-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319902 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.319914 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5slpg\" (UniqueName: \"kubernetes.io/projected/8d9e1445-577e-4b35-9613-f513b45b3500-kube-api-access-5slpg\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.325236 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.335348 4842 scope.go:117] "RemoveContainer" containerID="837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.335910 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec\": container with ID starting with 837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec not found: ID does not exist" containerID="837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.336091 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec"} err="failed to get container status \"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec\": rpc error: code = NotFound desc = could not find container \"837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec\": container with ID starting with 837cb6b9bb64dfbb5b6e29d12901b0017fbd9a5a74c6b70f32b347d0d18c6eec not found: ID does not exist" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.336180 4842 scope.go:117] "RemoveContainer" containerID="70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.373315 4842 scope.go:117] "RemoveContainer" containerID="f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.390280 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data" (OuterVolumeSpecName: "config-data") pod "8d9e1445-577e-4b35-9613-f513b45b3500" (UID: "8d9e1445-577e-4b35-9613-f513b45b3500"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.401010 4842 scope.go:117] "RemoveContainer" containerID="0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.423418 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npsls\" (UniqueName: \"kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls\") pod \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.423678 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data\") pod \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.423704 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle\") pod \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\" (UID: \"2d5e92bc-1b70-455d-9397-985e2b92f0e0\") " Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.424198 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d9e1445-577e-4b35-9613-f513b45b3500-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.427229 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls" (OuterVolumeSpecName: "kube-api-access-npsls") pod "2d5e92bc-1b70-455d-9397-985e2b92f0e0" (UID: "2d5e92bc-1b70-455d-9397-985e2b92f0e0"). InnerVolumeSpecName "kube-api-access-npsls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.435311 4842 scope.go:117] "RemoveContainer" containerID="ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.453421 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d5e92bc-1b70-455d-9397-985e2b92f0e0" (UID: "2d5e92bc-1b70-455d-9397-985e2b92f0e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.455069 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data" (OuterVolumeSpecName: "config-data") pod "2d5e92bc-1b70-455d-9397-985e2b92f0e0" (UID: "2d5e92bc-1b70-455d-9397-985e2b92f0e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.460364 4842 scope.go:117] "RemoveContainer" containerID="70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.461960 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc\": container with ID starting with 70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc not found: ID does not exist" containerID="70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.462081 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc"} err="failed to get container status \"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc\": rpc error: code = NotFound desc = could not find container \"70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc\": container with ID starting with 70e7eae685c645e1dfff4adafe47839eec8bcfa4db16006acf27a3e1af94febc not found: ID does not exist" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.462220 4842 scope.go:117] "RemoveContainer" containerID="f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.462814 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b\": container with ID starting with f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b not found: ID does not exist" containerID="f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.463051 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b"} err="failed to get container status \"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b\": rpc error: code = NotFound desc = could not find container \"f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b\": container with ID starting with f3d7af0caf512009bb805e3fcbbc5f4582e4815d939ca22744f2203d6472433b not found: ID does not exist" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.463180 4842 scope.go:117] "RemoveContainer" containerID="0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.464395 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990\": container with ID starting with 0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990 not found: ID does not exist" containerID="0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.464558 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990"} err="failed to get container status \"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990\": rpc error: code = NotFound desc = could not find container \"0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990\": container with ID starting with 0f9d292cdaff0b849b2f964ebef30afe27273ec07b9363d0d3f315e50639b990 not found: ID does not exist" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.464649 4842 scope.go:117] "RemoveContainer" containerID="ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.465079 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6\": container with ID starting with ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6 not found: ID does not exist" containerID="ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.465271 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6"} err="failed to get container status \"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6\": rpc error: code = NotFound desc = could not find container \"ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6\": container with ID starting with ce9c7e3bcb413a436640f708ef8ae946adf1c5aa4d9051b5f5bf9058941642b6 not found: ID does not exist" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.525846 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.525878 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5e92bc-1b70-455d-9397-985e2b92f0e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.525889 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npsls\" (UniqueName: \"kubernetes.io/projected/2d5e92bc-1b70-455d-9397-985e2b92f0e0-kube-api-access-npsls\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.689604 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.699354 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.708574 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.718722 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.733624 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.734329 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-central-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734345 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-central-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.734356 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734363 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.734377 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="sg-core" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734383 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="sg-core" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.734398 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-notification-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734404 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-notification-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: E1111 14:00:18.734413 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="proxy-httpd" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734420 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="proxy-httpd" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734640 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="sg-core" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734656 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-notification-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734671 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="ceilometer-central-agent" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734683 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" containerName="proxy-httpd" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.734690 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.736512 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.743700 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.744976 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.745202 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.745419 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.745617 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.748733 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.748780 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.748976 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.755020 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.762037 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.832925 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833243 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833432 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833550 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833630 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sns7m\" (UniqueName: \"kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833849 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.833931 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834048 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6nn9\" (UniqueName: \"kubernetes.io/projected/96d33b5d-2ce7-480d-8e48-1badc4624a2f-kube-api-access-w6nn9\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834155 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834279 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834358 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834430 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.834531 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936389 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936466 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936504 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936535 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936568 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sns7m\" (UniqueName: \"kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936585 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936601 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936626 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936671 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6nn9\" (UniqueName: \"kubernetes.io/projected/96d33b5d-2ce7-480d-8e48-1badc4624a2f-kube-api-access-w6nn9\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936688 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936723 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936750 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.936777 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.937821 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.937846 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.941981 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.943219 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.943463 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.943898 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.945489 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.946015 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.949636 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.949700 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.953859 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/96d33b5d-2ce7-480d-8e48-1badc4624a2f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.957966 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6nn9\" (UniqueName: \"kubernetes.io/projected/96d33b5d-2ce7-480d-8e48-1badc4624a2f-kube-api-access-w6nn9\") pod \"nova-cell1-novncproxy-0\" (UID: \"96d33b5d-2ce7-480d-8e48-1badc4624a2f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:18 crc kubenswrapper[4842]: I1111 14:00:18.958987 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sns7m\" (UniqueName: \"kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m\") pod \"ceilometer-0\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " pod="openstack/ceilometer-0" Nov 11 14:00:19 crc kubenswrapper[4842]: I1111 14:00:19.055006 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:19 crc kubenswrapper[4842]: I1111 14:00:19.072150 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:19 crc kubenswrapper[4842]: W1111 14:00:19.510502 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafca2368_3010_4711_b4d3_d0a65793f8d5.slice/crio-4901da82848a4b1d11b888743d99d67696acb5b165ddfcc2fdd65225d77cdcb3 WatchSource:0}: Error finding container 4901da82848a4b1d11b888743d99d67696acb5b165ddfcc2fdd65225d77cdcb3: Status 404 returned error can't find the container with id 4901da82848a4b1d11b888743d99d67696acb5b165ddfcc2fdd65225d77cdcb3 Nov 11 14:00:19 crc kubenswrapper[4842]: I1111 14:00:19.515326 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:19 crc kubenswrapper[4842]: I1111 14:00:19.610935 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 11 14:00:19 crc kubenswrapper[4842]: W1111 14:00:19.614847 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96d33b5d_2ce7_480d_8e48_1badc4624a2f.slice/crio-2dffcb0ff74a1ff373c31b24a29d1eae75092a55f878548ad2bcb0b78f2d26b3 WatchSource:0}: Error finding container 2dffcb0ff74a1ff373c31b24a29d1eae75092a55f878548ad2bcb0b78f2d26b3: Status 404 returned error can't find the container with id 2dffcb0ff74a1ff373c31b24a29d1eae75092a55f878548ad2bcb0b78f2d26b3 Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.082453 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d5e92bc-1b70-455d-9397-985e2b92f0e0" path="/var/lib/kubelet/pods/2d5e92bc-1b70-455d-9397-985e2b92f0e0/volumes" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.088272 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d9e1445-577e-4b35-9613-f513b45b3500" path="/var/lib/kubelet/pods/8d9e1445-577e-4b35-9613-f513b45b3500/volumes" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.354967 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerStarted","Data":"336fa7b76f8e201abcaa2d1d85684447b84b0eea27a3902796b14dec4f93684d"} Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.355364 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerStarted","Data":"a4f66bb6691459a96784c0990772dce81b703d002db41ff5a7cd2e401b5fb2e1"} Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.355376 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerStarted","Data":"4901da82848a4b1d11b888743d99d67696acb5b165ddfcc2fdd65225d77cdcb3"} Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.358534 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"96d33b5d-2ce7-480d-8e48-1badc4624a2f","Type":"ContainerStarted","Data":"6891c380d38d3ab9e31e61a7298e074145d4c384e695276d1062061aee64b321"} Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.358568 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"96d33b5d-2ce7-480d-8e48-1badc4624a2f","Type":"ContainerStarted","Data":"2dffcb0ff74a1ff373c31b24a29d1eae75092a55f878548ad2bcb0b78f2d26b3"} Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.381935 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.381916263 podStartE2EDuration="2.381916263s" podCreationTimestamp="2025-11-11 14:00:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:20.371691174 +0000 UTC m=+1831.031980803" watchObservedRunningTime="2025-11-11 14:00:20.381916263 +0000 UTC m=+1831.042205882" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.529961 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.530493 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.533674 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.535728 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 11 14:00:20 crc kubenswrapper[4842]: I1111 14:00:20.643082 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.369491 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerStarted","Data":"65d601b8012f3b05a779f601cd1eb4ca7227dda474b73c950553c928fa1d6854"} Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.369773 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.376031 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.575289 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.577432 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.594846 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688339 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688476 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688521 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688619 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfdzt\" (UniqueName: \"kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688689 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.688733 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790370 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790628 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790700 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfdzt\" (UniqueName: \"kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790753 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790787 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.790840 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.791719 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.791839 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.792331 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.793128 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.793226 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.820923 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfdzt\" (UniqueName: \"kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt\") pod \"dnsmasq-dns-5555bddbdf-g5rkh\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:21 crc kubenswrapper[4842]: I1111 14:00:21.902337 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:22 crc kubenswrapper[4842]: I1111 14:00:22.401753 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:00:22 crc kubenswrapper[4842]: W1111 14:00:22.407488 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa32bfef_1280_45cd_bcc7_abaa0485335a.slice/crio-76a47178cd53d2601a44a9ba36bd5d1adb4bdaaffef3a0f7d920c744dbe56115 WatchSource:0}: Error finding container 76a47178cd53d2601a44a9ba36bd5d1adb4bdaaffef3a0f7d920c744dbe56115: Status 404 returned error can't find the container with id 76a47178cd53d2601a44a9ba36bd5d1adb4bdaaffef3a0f7d920c744dbe56115 Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.388369 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerStarted","Data":"1b14edb2f420ff0eb14e555d48a36e416dfe60cabe83ab5d32936a6b35fd0b8e"} Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.388976 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.391166 4842 generic.go:334] "Generic (PLEG): container finished" podID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerID="49f84e3057d46c74c17574d89d867a97437dd38f7817c8d9b0272214ba4d06a1" exitCode=0 Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.391269 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" event={"ID":"aa32bfef-1280-45cd-bcc7-abaa0485335a","Type":"ContainerDied","Data":"49f84e3057d46c74c17574d89d867a97437dd38f7817c8d9b0272214ba4d06a1"} Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.391318 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" event={"ID":"aa32bfef-1280-45cd-bcc7-abaa0485335a","Type":"ContainerStarted","Data":"76a47178cd53d2601a44a9ba36bd5d1adb4bdaaffef3a0f7d920c744dbe56115"} Nov 11 14:00:23 crc kubenswrapper[4842]: I1111 14:00:23.418669 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.374593006 podStartE2EDuration="5.418649087s" podCreationTimestamp="2025-11-11 14:00:18 +0000 UTC" firstStartedPulling="2025-11-11 14:00:19.512706491 +0000 UTC m=+1830.172996110" lastFinishedPulling="2025-11-11 14:00:22.556762572 +0000 UTC m=+1833.217052191" observedRunningTime="2025-11-11 14:00:23.414255292 +0000 UTC m=+1834.074544911" watchObservedRunningTime="2025-11-11 14:00:23.418649087 +0000 UTC m=+1834.078938706" Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.072814 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.204961 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.402027 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" event={"ID":"aa32bfef-1280-45cd-bcc7-abaa0485335a","Type":"ContainerStarted","Data":"7caa17dfc7ebb2b563b83027afcc4dd6733af872342a0a89ffdbe5416b53cefa"} Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.424744 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" podStartSLOduration=3.424719827 podStartE2EDuration="3.424719827s" podCreationTimestamp="2025-11-11 14:00:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:24.421859996 +0000 UTC m=+1835.082149625" watchObservedRunningTime="2025-11-11 14:00:24.424719827 +0000 UTC m=+1835.085009446" Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.502433 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.502879 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-log" containerID="cri-o://ed148a42c5cc75287233d48a11afa95339c3436d5af5f9b52066abe9ff121f30" gracePeriod=30 Nov 11 14:00:24 crc kubenswrapper[4842]: I1111 14:00:24.503013 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-api" containerID="cri-o://eca63ed90cd311fbf4f31f860cb83f8f215f858e6db7fb6ec82d617b72accb3a" gracePeriod=30 Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.434564 4842 generic.go:334] "Generic (PLEG): container finished" podID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerID="ed148a42c5cc75287233d48a11afa95339c3436d5af5f9b52066abe9ff121f30" exitCode=143 Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.435853 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerDied","Data":"ed148a42c5cc75287233d48a11afa95339c3436d5af5f9b52066abe9ff121f30"} Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.435888 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.436053 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-central-agent" containerID="cri-o://a4f66bb6691459a96784c0990772dce81b703d002db41ff5a7cd2e401b5fb2e1" gracePeriod=30 Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.436545 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="proxy-httpd" containerID="cri-o://1b14edb2f420ff0eb14e555d48a36e416dfe60cabe83ab5d32936a6b35fd0b8e" gracePeriod=30 Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.436595 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="sg-core" containerID="cri-o://65d601b8012f3b05a779f601cd1eb4ca7227dda474b73c950553c928fa1d6854" gracePeriod=30 Nov 11 14:00:25 crc kubenswrapper[4842]: I1111 14:00:25.436628 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-notification-agent" containerID="cri-o://336fa7b76f8e201abcaa2d1d85684447b84b0eea27a3902796b14dec4f93684d" gracePeriod=30 Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.455312 4842 generic.go:334] "Generic (PLEG): container finished" podID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerID="eca63ed90cd311fbf4f31f860cb83f8f215f858e6db7fb6ec82d617b72accb3a" exitCode=0 Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.455459 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerDied","Data":"eca63ed90cd311fbf4f31f860cb83f8f215f858e6db7fb6ec82d617b72accb3a"} Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.458470 4842 generic.go:334] "Generic (PLEG): container finished" podID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerID="1b14edb2f420ff0eb14e555d48a36e416dfe60cabe83ab5d32936a6b35fd0b8e" exitCode=0 Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.458504 4842 generic.go:334] "Generic (PLEG): container finished" podID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerID="65d601b8012f3b05a779f601cd1eb4ca7227dda474b73c950553c928fa1d6854" exitCode=2 Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.458512 4842 generic.go:334] "Generic (PLEG): container finished" podID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerID="336fa7b76f8e201abcaa2d1d85684447b84b0eea27a3902796b14dec4f93684d" exitCode=0 Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.459664 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerDied","Data":"1b14edb2f420ff0eb14e555d48a36e416dfe60cabe83ab5d32936a6b35fd0b8e"} Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.459692 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerDied","Data":"65d601b8012f3b05a779f601cd1eb4ca7227dda474b73c950553c928fa1d6854"} Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.459703 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerDied","Data":"336fa7b76f8e201abcaa2d1d85684447b84b0eea27a3902796b14dec4f93684d"} Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.649482 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.791434 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle\") pod \"b32a9059-c9d3-441f-b0fb-a407403c905f\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.791526 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts7h4\" (UniqueName: \"kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4\") pod \"b32a9059-c9d3-441f-b0fb-a407403c905f\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.791761 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data\") pod \"b32a9059-c9d3-441f-b0fb-a407403c905f\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.791800 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs\") pod \"b32a9059-c9d3-441f-b0fb-a407403c905f\" (UID: \"b32a9059-c9d3-441f-b0fb-a407403c905f\") " Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.792779 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs" (OuterVolumeSpecName: "logs") pod "b32a9059-c9d3-441f-b0fb-a407403c905f" (UID: "b32a9059-c9d3-441f-b0fb-a407403c905f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.800489 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4" (OuterVolumeSpecName: "kube-api-access-ts7h4") pod "b32a9059-c9d3-441f-b0fb-a407403c905f" (UID: "b32a9059-c9d3-441f-b0fb-a407403c905f"). InnerVolumeSpecName "kube-api-access-ts7h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.825130 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b32a9059-c9d3-441f-b0fb-a407403c905f" (UID: "b32a9059-c9d3-441f-b0fb-a407403c905f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.833281 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data" (OuterVolumeSpecName: "config-data") pod "b32a9059-c9d3-441f-b0fb-a407403c905f" (UID: "b32a9059-c9d3-441f-b0fb-a407403c905f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.898161 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.898229 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b32a9059-c9d3-441f-b0fb-a407403c905f-logs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.898240 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b32a9059-c9d3-441f-b0fb-a407403c905f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:26 crc kubenswrapper[4842]: I1111 14:00:26.898251 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts7h4\" (UniqueName: \"kubernetes.io/projected/b32a9059-c9d3-441f-b0fb-a407403c905f-kube-api-access-ts7h4\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.470778 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b32a9059-c9d3-441f-b0fb-a407403c905f","Type":"ContainerDied","Data":"8359b090d7344a19cbd2419d13c4259bac6d00df81bc48debbfdd4123a142327"} Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.470843 4842 scope.go:117] "RemoveContainer" containerID="eca63ed90cd311fbf4f31f860cb83f8f215f858e6db7fb6ec82d617b72accb3a" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.470856 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.503461 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.505891 4842 scope.go:117] "RemoveContainer" containerID="ed148a42c5cc75287233d48a11afa95339c3436d5af5f9b52066abe9ff121f30" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.511388 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.529398 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:27 crc kubenswrapper[4842]: E1111 14:00:27.529983 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-log" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.530004 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-log" Nov 11 14:00:27 crc kubenswrapper[4842]: E1111 14:00:27.530050 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-api" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.530056 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-api" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.530271 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-log" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.530292 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" containerName="nova-api-api" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.531494 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.533180 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.533180 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.533947 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.557992 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.613412 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.614073 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztql6\" (UniqueName: \"kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.614325 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.614380 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.614553 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.614615 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716495 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztql6\" (UniqueName: \"kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716591 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716618 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716677 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716711 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.716753 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.717559 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.721758 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.725081 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.726887 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.728157 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.734646 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztql6\" (UniqueName: \"kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6\") pod \"nova-api-0\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " pod="openstack/nova-api-0" Nov 11 14:00:27 crc kubenswrapper[4842]: I1111 14:00:27.849535 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:28 crc kubenswrapper[4842]: I1111 14:00:28.078583 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b32a9059-c9d3-441f-b0fb-a407403c905f" path="/var/lib/kubelet/pods/b32a9059-c9d3-441f-b0fb-a407403c905f/volumes" Nov 11 14:00:28 crc kubenswrapper[4842]: I1111 14:00:28.327970 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:28 crc kubenswrapper[4842]: I1111 14:00:28.482161 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerStarted","Data":"ed057205b013c46bd5ebd1dc7b79d1ae49624d9802ac52680823033990939c03"} Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.059478 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:00:29 crc kubenswrapper[4842]: E1111 14:00:29.059746 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.072769 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.091876 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.493051 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerStarted","Data":"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66"} Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.493395 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerStarted","Data":"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc"} Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.498968 4842 generic.go:334] "Generic (PLEG): container finished" podID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerID="a4f66bb6691459a96784c0990772dce81b703d002db41ff5a7cd2e401b5fb2e1" exitCode=0 Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.499240 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerDied","Data":"a4f66bb6691459a96784c0990772dce81b703d002db41ff5a7cd2e401b5fb2e1"} Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.529371 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.563767 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.563744569 podStartE2EDuration="2.563744569s" podCreationTimestamp="2025-11-11 14:00:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:29.511551143 +0000 UTC m=+1840.171840762" watchObservedRunningTime="2025-11-11 14:00:29.563744569 +0000 UTC m=+1840.224034208" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.689318 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-fhtn5"] Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.692475 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.696003 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.696034 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.703313 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-fhtn5"] Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.765252 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.765311 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.765477 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzwxd\" (UniqueName: \"kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.765497 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.867040 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzwxd\" (UniqueName: \"kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.867184 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.867241 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.867276 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.874230 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.875962 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.877576 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.884880 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzwxd\" (UniqueName: \"kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd\") pod \"nova-cell1-cell-mapping-fhtn5\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:29 crc kubenswrapper[4842]: I1111 14:00:29.964388 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.021771 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071468 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071613 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071686 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071747 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071798 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071818 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071837 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.071854 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sns7m\" (UniqueName: \"kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m\") pod \"afca2368-3010-4711-b4d3-d0a65793f8d5\" (UID: \"afca2368-3010-4711-b4d3-d0a65793f8d5\") " Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.075120 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.076202 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.077626 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m" (OuterVolumeSpecName: "kube-api-access-sns7m") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "kube-api-access-sns7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.080940 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts" (OuterVolumeSpecName: "scripts") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.106671 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.147509 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.158434 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.173999 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174039 4842 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174051 4842 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174063 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174074 4842 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174084 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sns7m\" (UniqueName: \"kubernetes.io/projected/afca2368-3010-4711-b4d3-d0a65793f8d5-kube-api-access-sns7m\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.174114 4842 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afca2368-3010-4711-b4d3-d0a65793f8d5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.253536 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data" (OuterVolumeSpecName: "config-data") pod "afca2368-3010-4711-b4d3-d0a65793f8d5" (UID: "afca2368-3010-4711-b4d3-d0a65793f8d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.275427 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afca2368-3010-4711-b4d3-d0a65793f8d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.478544 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-fhtn5"] Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.511493 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afca2368-3010-4711-b4d3-d0a65793f8d5","Type":"ContainerDied","Data":"4901da82848a4b1d11b888743d99d67696acb5b165ddfcc2fdd65225d77cdcb3"} Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.511509 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.511549 4842 scope.go:117] "RemoveContainer" containerID="1b14edb2f420ff0eb14e555d48a36e416dfe60cabe83ab5d32936a6b35fd0b8e" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.512922 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-fhtn5" event={"ID":"68060912-a0f0-46b2-8353-95d60b23450e","Type":"ContainerStarted","Data":"0688e0ee448661b1ed06d33741d2c433d4c41064890a0327dbfd53e5ca8bff81"} Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.630842 4842 scope.go:117] "RemoveContainer" containerID="65d601b8012f3b05a779f601cd1eb4ca7227dda474b73c950553c928fa1d6854" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.653888 4842 scope.go:117] "RemoveContainer" containerID="336fa7b76f8e201abcaa2d1d85684447b84b0eea27a3902796b14dec4f93684d" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.660492 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.679266 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.687533 4842 scope.go:117] "RemoveContainer" containerID="a4f66bb6691459a96784c0990772dce81b703d002db41ff5a7cd2e401b5fb2e1" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.698615 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:30 crc kubenswrapper[4842]: E1111 14:00:30.699061 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="sg-core" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699077 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="sg-core" Nov 11 14:00:30 crc kubenswrapper[4842]: E1111 14:00:30.699092 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-notification-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699100 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-notification-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: E1111 14:00:30.699124 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="proxy-httpd" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699131 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="proxy-httpd" Nov 11 14:00:30 crc kubenswrapper[4842]: E1111 14:00:30.699142 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-central-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699148 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-central-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699331 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-central-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699349 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="proxy-httpd" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699362 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="ceilometer-notification-agent" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.699373 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" containerName="sg-core" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.701321 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.704135 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.704773 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.704993 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.724162 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785526 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-run-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785603 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-scripts\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785621 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785762 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-log-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785940 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-config-data\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.785975 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.786094 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.786174 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6rvj\" (UniqueName: \"kubernetes.io/projected/534497bc-bc86-410d-88c9-ef65d8e2463c-kube-api-access-v6rvj\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.887395 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-run-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.887544 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-scripts\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.887572 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888276 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-log-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888316 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-run-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888378 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-config-data\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888399 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888476 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888537 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6rvj\" (UniqueName: \"kubernetes.io/projected/534497bc-bc86-410d-88c9-ef65d8e2463c-kube-api-access-v6rvj\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.888672 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/534497bc-bc86-410d-88c9-ef65d8e2463c-log-httpd\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.893895 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.897866 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.899525 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.901356 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-config-data\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.906168 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/534497bc-bc86-410d-88c9-ef65d8e2463c-scripts\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:30 crc kubenswrapper[4842]: I1111 14:00:30.909836 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6rvj\" (UniqueName: \"kubernetes.io/projected/534497bc-bc86-410d-88c9-ef65d8e2463c-kube-api-access-v6rvj\") pod \"ceilometer-0\" (UID: \"534497bc-bc86-410d-88c9-ef65d8e2463c\") " pod="openstack/ceilometer-0" Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.028191 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.479444 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 11 14:00:31 crc kubenswrapper[4842]: W1111 14:00:31.481664 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod534497bc_bc86_410d_88c9_ef65d8e2463c.slice/crio-1c7afc6229ad9c0a45547d4fbea70eadc19395f1fd7a490685d6040d6482eaab WatchSource:0}: Error finding container 1c7afc6229ad9c0a45547d4fbea70eadc19395f1fd7a490685d6040d6482eaab: Status 404 returned error can't find the container with id 1c7afc6229ad9c0a45547d4fbea70eadc19395f1fd7a490685d6040d6482eaab Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.524779 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-fhtn5" event={"ID":"68060912-a0f0-46b2-8353-95d60b23450e","Type":"ContainerStarted","Data":"c471fe3400187582065aafaefdcc71a45198c8672f951425f26af8fe907d08dc"} Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.525840 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"534497bc-bc86-410d-88c9-ef65d8e2463c","Type":"ContainerStarted","Data":"1c7afc6229ad9c0a45547d4fbea70eadc19395f1fd7a490685d6040d6482eaab"} Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.543812 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-fhtn5" podStartSLOduration=2.5437972110000002 podStartE2EDuration="2.543797211s" podCreationTimestamp="2025-11-11 14:00:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:31.540781146 +0000 UTC m=+1842.201070785" watchObservedRunningTime="2025-11-11 14:00:31.543797211 +0000 UTC m=+1842.204086830" Nov 11 14:00:31 crc kubenswrapper[4842]: I1111 14:00:31.906202 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.024268 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.024497 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="dnsmasq-dns" containerID="cri-o://41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec" gracePeriod=10 Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.074326 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afca2368-3010-4711-b4d3-d0a65793f8d5" path="/var/lib/kubelet/pods/afca2368-3010-4711-b4d3-d0a65793f8d5/volumes" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.529143 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.543485 4842 generic.go:334] "Generic (PLEG): container finished" podID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerID="41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec" exitCode=0 Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.543556 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.543595 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" event={"ID":"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9","Type":"ContainerDied","Data":"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec"} Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.543661 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-754c5fdd47-xdgzz" event={"ID":"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9","Type":"ContainerDied","Data":"67c5e17e4f1ba4633efa3e4de984e7104fce2e38b917004e34feb6d6119886a0"} Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.543684 4842 scope.go:117] "RemoveContainer" containerID="41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.552112 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"534497bc-bc86-410d-88c9-ef65d8e2463c","Type":"ContainerStarted","Data":"1a67a3128a690ac4c9ba5eb74083bf1cddc6ca66e1b61a6368c4bb153fa1733a"} Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.552184 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"534497bc-bc86-410d-88c9-ef65d8e2463c","Type":"ContainerStarted","Data":"133a2c56ff14db72b03f13b71a3c99f9fd1654215bcfe6ea9beae9040488b52b"} Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.641861 4842 scope.go:117] "RemoveContainer" containerID="00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.683599 4842 scope.go:117] "RemoveContainer" containerID="41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec" Nov 11 14:00:32 crc kubenswrapper[4842]: E1111 14:00:32.684047 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec\": container with ID starting with 41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec not found: ID does not exist" containerID="41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.684177 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec"} err="failed to get container status \"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec\": rpc error: code = NotFound desc = could not find container \"41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec\": container with ID starting with 41b19fec607f73e535ff5e824d536b806d777007e9bdbc4a156bb204a2971eec not found: ID does not exist" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.684278 4842 scope.go:117] "RemoveContainer" containerID="00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473" Nov 11 14:00:32 crc kubenswrapper[4842]: E1111 14:00:32.684840 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473\": container with ID starting with 00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473 not found: ID does not exist" containerID="00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.684869 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473"} err="failed to get container status \"00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473\": rpc error: code = NotFound desc = could not find container \"00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473\": container with ID starting with 00d6da56d192b90af4b87e10c091f2b66f1ada2a58fabd71edb391d0c49da473 not found: ID does not exist" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.729974 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.730271 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl5nq\" (UniqueName: \"kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.730376 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.730457 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.730510 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.730540 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb\") pod \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\" (UID: \"8578a0f1-05c9-4f7b-8364-d529ad3aa5b9\") " Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.738748 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq" (OuterVolumeSpecName: "kube-api-access-rl5nq") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "kube-api-access-rl5nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.797497 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.799556 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.803302 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config" (OuterVolumeSpecName: "config") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.818873 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.819693 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" (UID: "8578a0f1-05c9-4f7b-8364-d529ad3aa5b9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833822 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-config\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833872 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl5nq\" (UniqueName: \"kubernetes.io/projected/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-kube-api-access-rl5nq\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833889 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833901 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833914 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.833926 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.887213 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 14:00:32 crc kubenswrapper[4842]: I1111 14:00:32.897334 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-754c5fdd47-xdgzz"] Nov 11 14:00:33 crc kubenswrapper[4842]: I1111 14:00:33.566330 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"534497bc-bc86-410d-88c9-ef65d8e2463c","Type":"ContainerStarted","Data":"14ba613566ebdce8a1ce16b53950f26ad22f7582606242feaed6127ef2b0460d"} Nov 11 14:00:34 crc kubenswrapper[4842]: I1111 14:00:34.070635 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" path="/var/lib/kubelet/pods/8578a0f1-05c9-4f7b-8364-d529ad3aa5b9/volumes" Nov 11 14:00:35 crc kubenswrapper[4842]: I1111 14:00:35.586372 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"534497bc-bc86-410d-88c9-ef65d8e2463c","Type":"ContainerStarted","Data":"87bdcd034de9cd88a339cb920b83cdbbe195b7393b50ae0d45d3d476e08e5d6e"} Nov 11 14:00:35 crc kubenswrapper[4842]: I1111 14:00:35.587833 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 11 14:00:35 crc kubenswrapper[4842]: I1111 14:00:35.617151 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7092550749999997 podStartE2EDuration="5.617135768s" podCreationTimestamp="2025-11-11 14:00:30 +0000 UTC" firstStartedPulling="2025-11-11 14:00:31.484038783 +0000 UTC m=+1842.144328402" lastFinishedPulling="2025-11-11 14:00:34.391919476 +0000 UTC m=+1845.052209095" observedRunningTime="2025-11-11 14:00:35.611735706 +0000 UTC m=+1846.272025345" watchObservedRunningTime="2025-11-11 14:00:35.617135768 +0000 UTC m=+1846.277425387" Nov 11 14:00:36 crc kubenswrapper[4842]: I1111 14:00:36.600633 4842 generic.go:334] "Generic (PLEG): container finished" podID="68060912-a0f0-46b2-8353-95d60b23450e" containerID="c471fe3400187582065aafaefdcc71a45198c8672f951425f26af8fe907d08dc" exitCode=0 Nov 11 14:00:36 crc kubenswrapper[4842]: I1111 14:00:36.602927 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-fhtn5" event={"ID":"68060912-a0f0-46b2-8353-95d60b23450e","Type":"ContainerDied","Data":"c471fe3400187582065aafaefdcc71a45198c8672f951425f26af8fe907d08dc"} Nov 11 14:00:37 crc kubenswrapper[4842]: I1111 14:00:37.851431 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:37 crc kubenswrapper[4842]: I1111 14:00:37.851691 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.010054 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.128625 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle\") pod \"68060912-a0f0-46b2-8353-95d60b23450e\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.128673 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") pod \"68060912-a0f0-46b2-8353-95d60b23450e\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.128689 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts\") pod \"68060912-a0f0-46b2-8353-95d60b23450e\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.128832 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzwxd\" (UniqueName: \"kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd\") pod \"68060912-a0f0-46b2-8353-95d60b23450e\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.135072 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd" (OuterVolumeSpecName: "kube-api-access-gzwxd") pod "68060912-a0f0-46b2-8353-95d60b23450e" (UID: "68060912-a0f0-46b2-8353-95d60b23450e"). InnerVolumeSpecName "kube-api-access-gzwxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.135709 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts" (OuterVolumeSpecName: "scripts") pod "68060912-a0f0-46b2-8353-95d60b23450e" (UID: "68060912-a0f0-46b2-8353-95d60b23450e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:38 crc kubenswrapper[4842]: E1111 14:00:38.164633 4842 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data podName:68060912-a0f0-46b2-8353-95d60b23450e nodeName:}" failed. No retries permitted until 2025-11-11 14:00:38.664598576 +0000 UTC m=+1849.324888195 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data") pod "68060912-a0f0-46b2-8353-95d60b23450e" (UID: "68060912-a0f0-46b2-8353-95d60b23450e") : error deleting /var/lib/kubelet/pods/68060912-a0f0-46b2-8353-95d60b23450e/volume-subpaths: remove /var/lib/kubelet/pods/68060912-a0f0-46b2-8353-95d60b23450e/volume-subpaths: no such file or directory Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.167872 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "68060912-a0f0-46b2-8353-95d60b23450e" (UID: "68060912-a0f0-46b2-8353-95d60b23450e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.230804 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.231030 4842 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-scripts\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.231039 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzwxd\" (UniqueName: \"kubernetes.io/projected/68060912-a0f0-46b2-8353-95d60b23450e-kube-api-access-gzwxd\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.624413 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-fhtn5" event={"ID":"68060912-a0f0-46b2-8353-95d60b23450e","Type":"ContainerDied","Data":"0688e0ee448661b1ed06d33741d2c433d4c41064890a0327dbfd53e5ca8bff81"} Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.624798 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0688e0ee448661b1ed06d33741d2c433d4c41064890a0327dbfd53e5ca8bff81" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.624441 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-fhtn5" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.741946 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") pod \"68060912-a0f0-46b2-8353-95d60b23450e\" (UID: \"68060912-a0f0-46b2-8353-95d60b23450e\") " Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.759310 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data" (OuterVolumeSpecName: "config-data") pod "68060912-a0f0-46b2-8353-95d60b23450e" (UID: "68060912-a0f0-46b2-8353-95d60b23450e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.805330 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.805622 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-log" containerID="cri-o://800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc" gracePeriod=30 Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.805697 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-api" containerID="cri-o://b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66" gracePeriod=30 Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.813387 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.220:8774/\": EOF" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.813582 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.220:8774/\": EOF" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.816533 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.816740 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4208c47d-d376-4eb3-9751-b19e6b672359" containerName="nova-scheduler-scheduler" containerID="cri-o://3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5" gracePeriod=30 Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.844850 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68060912-a0f0-46b2-8353-95d60b23450e-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.888329 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.888663 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-log" containerID="cri-o://d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524" gracePeriod=30 Nov 11 14:00:38 crc kubenswrapper[4842]: I1111 14:00:38.888975 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-metadata" containerID="cri-o://5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803" gracePeriod=30 Nov 11 14:00:39 crc kubenswrapper[4842]: I1111 14:00:39.646133 4842 generic.go:334] "Generic (PLEG): container finished" podID="319b2767-fb86-4b6b-873e-492634aa5465" containerID="d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524" exitCode=143 Nov 11 14:00:39 crc kubenswrapper[4842]: I1111 14:00:39.646219 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerDied","Data":"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524"} Nov 11 14:00:39 crc kubenswrapper[4842]: I1111 14:00:39.648281 4842 generic.go:334] "Generic (PLEG): container finished" podID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerID="800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc" exitCode=143 Nov 11 14:00:39 crc kubenswrapper[4842]: I1111 14:00:39.648315 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerDied","Data":"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc"} Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.190411 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.289946 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c2vt\" (UniqueName: \"kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt\") pod \"4208c47d-d376-4eb3-9751-b19e6b672359\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.290011 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle\") pod \"4208c47d-d376-4eb3-9751-b19e6b672359\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.290070 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data\") pod \"4208c47d-d376-4eb3-9751-b19e6b672359\" (UID: \"4208c47d-d376-4eb3-9751-b19e6b672359\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.308376 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt" (OuterVolumeSpecName: "kube-api-access-2c2vt") pod "4208c47d-d376-4eb3-9751-b19e6b672359" (UID: "4208c47d-d376-4eb3-9751-b19e6b672359"). InnerVolumeSpecName "kube-api-access-2c2vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.323919 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data" (OuterVolumeSpecName: "config-data") pod "4208c47d-d376-4eb3-9751-b19e6b672359" (UID: "4208c47d-d376-4eb3-9751-b19e6b672359"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.332286 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4208c47d-d376-4eb3-9751-b19e6b672359" (UID: "4208c47d-d376-4eb3-9751-b19e6b672359"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.392284 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.392314 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c2vt\" (UniqueName: \"kubernetes.io/projected/4208c47d-d376-4eb3-9751-b19e6b672359-kube-api-access-2c2vt\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.392323 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4208c47d-d376-4eb3-9751-b19e6b672359-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.460576 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.595362 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle\") pod \"319b2767-fb86-4b6b-873e-492634aa5465\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.595480 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2z8k\" (UniqueName: \"kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k\") pod \"319b2767-fb86-4b6b-873e-492634aa5465\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.595665 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs\") pod \"319b2767-fb86-4b6b-873e-492634aa5465\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.595697 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs\") pod \"319b2767-fb86-4b6b-873e-492634aa5465\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.595746 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data\") pod \"319b2767-fb86-4b6b-873e-492634aa5465\" (UID: \"319b2767-fb86-4b6b-873e-492634aa5465\") " Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.596065 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs" (OuterVolumeSpecName: "logs") pod "319b2767-fb86-4b6b-873e-492634aa5465" (UID: "319b2767-fb86-4b6b-873e-492634aa5465"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.596317 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/319b2767-fb86-4b6b-873e-492634aa5465-logs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.599260 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k" (OuterVolumeSpecName: "kube-api-access-c2z8k") pod "319b2767-fb86-4b6b-873e-492634aa5465" (UID: "319b2767-fb86-4b6b-873e-492634aa5465"). InnerVolumeSpecName "kube-api-access-c2z8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.634272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "319b2767-fb86-4b6b-873e-492634aa5465" (UID: "319b2767-fb86-4b6b-873e-492634aa5465"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.643782 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data" (OuterVolumeSpecName: "config-data") pod "319b2767-fb86-4b6b-873e-492634aa5465" (UID: "319b2767-fb86-4b6b-873e-492634aa5465"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.661276 4842 generic.go:334] "Generic (PLEG): container finished" podID="319b2767-fb86-4b6b-873e-492634aa5465" containerID="5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803" exitCode=0 Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.661367 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerDied","Data":"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803"} Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.661398 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"319b2767-fb86-4b6b-873e-492634aa5465","Type":"ContainerDied","Data":"bedcc7c849010dc4880c218deda54d83e97f070fcbb6bd8c0f9deb7c08dd9fa9"} Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.661416 4842 scope.go:117] "RemoveContainer" containerID="5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.661636 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.663552 4842 generic.go:334] "Generic (PLEG): container finished" podID="4208c47d-d376-4eb3-9751-b19e6b672359" containerID="3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5" exitCode=0 Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.663580 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4208c47d-d376-4eb3-9751-b19e6b672359","Type":"ContainerDied","Data":"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5"} Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.663598 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4208c47d-d376-4eb3-9751-b19e6b672359","Type":"ContainerDied","Data":"b10255fa046d88d36829cbcb6548ccca9469c73289e6626a5950a32984a1c44c"} Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.663648 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.665428 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "319b2767-fb86-4b6b-873e-492634aa5465" (UID: "319b2767-fb86-4b6b-873e-492634aa5465"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.697131 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.698073 4842 scope.go:117] "RemoveContainer" containerID="d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.698545 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2z8k\" (UniqueName: \"kubernetes.io/projected/319b2767-fb86-4b6b-873e-492634aa5465-kube-api-access-c2z8k\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.698575 4842 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.698589 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.698599 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b2767-fb86-4b6b-873e-492634aa5465-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.716564 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.727354 4842 scope.go:117] "RemoveContainer" containerID="5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728085 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.728663 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4208c47d-d376-4eb3-9751-b19e6b672359" containerName="nova-scheduler-scheduler" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728689 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4208c47d-d376-4eb3-9751-b19e6b672359" containerName="nova-scheduler-scheduler" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.728881 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-metadata" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728895 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-metadata" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.728912 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="dnsmasq-dns" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728919 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="dnsmasq-dns" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.728943 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68060912-a0f0-46b2-8353-95d60b23450e" containerName="nova-manage" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728950 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="68060912-a0f0-46b2-8353-95d60b23450e" containerName="nova-manage" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.728966 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="init" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.728974 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="init" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.729001 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-log" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729008 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-log" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.729260 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803\": container with ID starting with 5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803 not found: ID does not exist" containerID="5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729320 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803"} err="failed to get container status \"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803\": rpc error: code = NotFound desc = could not find container \"5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803\": container with ID starting with 5e562959e6c5d96777060a5392a36bf92e981c60447494e1627e8457b7313803 not found: ID does not exist" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729348 4842 scope.go:117] "RemoveContainer" containerID="d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729275 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-log" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729392 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="4208c47d-d376-4eb3-9751-b19e6b672359" containerName="nova-scheduler-scheduler" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729411 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8578a0f1-05c9-4f7b-8364-d529ad3aa5b9" containerName="dnsmasq-dns" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729423 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="319b2767-fb86-4b6b-873e-492634aa5465" containerName="nova-metadata-metadata" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.729437 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="68060912-a0f0-46b2-8353-95d60b23450e" containerName="nova-manage" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.730409 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.731323 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524\": container with ID starting with d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524 not found: ID does not exist" containerID="d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.731365 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524"} err="failed to get container status \"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524\": rpc error: code = NotFound desc = could not find container \"d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524\": container with ID starting with d6822d763167575ea36aff5a83d7ef7342553864cb2e233c8eb2262cb1583524 not found: ID does not exist" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.731394 4842 scope.go:117] "RemoveContainer" containerID="3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.733000 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.746350 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.810284 4842 scope.go:117] "RemoveContainer" containerID="3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5" Nov 11 14:00:40 crc kubenswrapper[4842]: E1111 14:00:40.810815 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5\": container with ID starting with 3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5 not found: ID does not exist" containerID="3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.810858 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5"} err="failed to get container status \"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5\": rpc error: code = NotFound desc = could not find container \"3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5\": container with ID starting with 3e85352dca922abdf3a57aeb108030140597e2d81d6af588419ef3c4db0c65a5 not found: ID does not exist" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.905843 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.906120 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-config-data\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:40 crc kubenswrapper[4842]: I1111 14:00:40.906146 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dpfs\" (UniqueName: \"kubernetes.io/projected/c57355b5-f8b3-463a-933b-fedf7d2886a6-kube-api-access-7dpfs\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.007731 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.007835 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-config-data\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.007857 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dpfs\" (UniqueName: \"kubernetes.io/projected/c57355b5-f8b3-463a-933b-fedf7d2886a6-kube-api-access-7dpfs\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.012231 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-config-data\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.013242 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c57355b5-f8b3-463a-933b-fedf7d2886a6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.023347 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dpfs\" (UniqueName: \"kubernetes.io/projected/c57355b5-f8b3-463a-933b-fedf7d2886a6-kube-api-access-7dpfs\") pod \"nova-scheduler-0\" (UID: \"c57355b5-f8b3-463a-933b-fedf7d2886a6\") " pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.068084 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.180346 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.198693 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.203328 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.207471 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.212445 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.212671 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.221436 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.312529 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d993b82e-e35e-44d7-9e76-1bae92e42c25-logs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.312609 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.312649 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-config-data\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.312711 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.312740 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnfl9\" (UniqueName: \"kubernetes.io/projected/d993b82e-e35e-44d7-9e76-1bae92e42c25-kube-api-access-bnfl9\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414215 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414259 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnfl9\" (UniqueName: \"kubernetes.io/projected/d993b82e-e35e-44d7-9e76-1bae92e42c25-kube-api-access-bnfl9\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414416 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d993b82e-e35e-44d7-9e76-1bae92e42c25-logs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414463 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414518 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-config-data\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.414822 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d993b82e-e35e-44d7-9e76-1bae92e42c25-logs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.419198 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.419245 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.429715 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d993b82e-e35e-44d7-9e76-1bae92e42c25-config-data\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.430642 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnfl9\" (UniqueName: \"kubernetes.io/projected/d993b82e-e35e-44d7-9e76-1bae92e42c25-kube-api-access-bnfl9\") pod \"nova-metadata-0\" (UID: \"d993b82e-e35e-44d7-9e76-1bae92e42c25\") " pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.555609 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.557601 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 11 14:00:41 crc kubenswrapper[4842]: I1111 14:00:41.687185 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c57355b5-f8b3-463a-933b-fedf7d2886a6","Type":"ContainerStarted","Data":"fa6743c369f1aec1bc534d7c52201825b6ac0480a67652badcfa9f286c54e32e"} Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.025297 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 11 14:00:42 crc kubenswrapper[4842]: W1111 14:00:42.030486 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd993b82e_e35e_44d7_9e76_1bae92e42c25.slice/crio-3f366194324288fe3d8222d32c4ac1de20f6785fac33844713fcd737bcb7df4d WatchSource:0}: Error finding container 3f366194324288fe3d8222d32c4ac1de20f6785fac33844713fcd737bcb7df4d: Status 404 returned error can't find the container with id 3f366194324288fe3d8222d32c4ac1de20f6785fac33844713fcd737bcb7df4d Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.079045 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="319b2767-fb86-4b6b-873e-492634aa5465" path="/var/lib/kubelet/pods/319b2767-fb86-4b6b-873e-492634aa5465/volumes" Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.080081 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4208c47d-d376-4eb3-9751-b19e6b672359" path="/var/lib/kubelet/pods/4208c47d-d376-4eb3-9751-b19e6b672359/volumes" Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.704893 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d993b82e-e35e-44d7-9e76-1bae92e42c25","Type":"ContainerStarted","Data":"28b2f2185cdd7812e338910f46d290b8968d08780c40f222412e15e466c3e91a"} Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.705260 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d993b82e-e35e-44d7-9e76-1bae92e42c25","Type":"ContainerStarted","Data":"3ee73e2126d6a5947d36d26059d5566ea1065dbdc8f09f2c5c89f4364d3eb372"} Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.705273 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d993b82e-e35e-44d7-9e76-1bae92e42c25","Type":"ContainerStarted","Data":"3f366194324288fe3d8222d32c4ac1de20f6785fac33844713fcd737bcb7df4d"} Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.709317 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c57355b5-f8b3-463a-933b-fedf7d2886a6","Type":"ContainerStarted","Data":"bf1f3339c725603a86f0f31412c1a52f9d262e3c5b9e0be0636afda5adaceae2"} Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.722934 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.722915897 podStartE2EDuration="1.722915897s" podCreationTimestamp="2025-11-11 14:00:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:42.720939752 +0000 UTC m=+1853.381229371" watchObservedRunningTime="2025-11-11 14:00:42.722915897 +0000 UTC m=+1853.383205516" Nov 11 14:00:42 crc kubenswrapper[4842]: I1111 14:00:42.742949 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.742931643 podStartE2EDuration="2.742931643s" podCreationTimestamp="2025-11-11 14:00:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:42.736887132 +0000 UTC m=+1853.397176751" watchObservedRunningTime="2025-11-11 14:00:42.742931643 +0000 UTC m=+1853.403221262" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.059696 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:00:43 crc kubenswrapper[4842]: E1111 14:00:43.059976 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.443161 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557084 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557176 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztql6\" (UniqueName: \"kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557306 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557343 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557385 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.557456 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs\") pod \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\" (UID: \"49c5bc79-a738-4ac7-9efd-bc335a8ef954\") " Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.558639 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs" (OuterVolumeSpecName: "logs") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.567860 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6" (OuterVolumeSpecName: "kube-api-access-ztql6") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "kube-api-access-ztql6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.594386 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data" (OuterVolumeSpecName: "config-data") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.601618 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.614018 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.623771 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "49c5bc79-a738-4ac7-9efd-bc335a8ef954" (UID: "49c5bc79-a738-4ac7-9efd-bc335a8ef954"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659442 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659510 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659527 4842 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c5bc79-a738-4ac7-9efd-bc335a8ef954-logs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659540 4842 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659548 4842 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c5bc79-a738-4ac7-9efd-bc335a8ef954-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.659556 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztql6\" (UniqueName: \"kubernetes.io/projected/49c5bc79-a738-4ac7-9efd-bc335a8ef954-kube-api-access-ztql6\") on node \"crc\" DevicePath \"\"" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.733401 4842 generic.go:334] "Generic (PLEG): container finished" podID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerID="b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66" exitCode=0 Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.733470 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.733519 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerDied","Data":"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66"} Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.733558 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"49c5bc79-a738-4ac7-9efd-bc335a8ef954","Type":"ContainerDied","Data":"ed057205b013c46bd5ebd1dc7b79d1ae49624d9802ac52680823033990939c03"} Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.733575 4842 scope.go:117] "RemoveContainer" containerID="b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.756204 4842 scope.go:117] "RemoveContainer" containerID="800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.773296 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.783063 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.793145 4842 scope.go:117] "RemoveContainer" containerID="b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66" Nov 11 14:00:43 crc kubenswrapper[4842]: E1111 14:00:43.793790 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66\": container with ID starting with b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66 not found: ID does not exist" containerID="b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.793823 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66"} err="failed to get container status \"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66\": rpc error: code = NotFound desc = could not find container \"b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66\": container with ID starting with b61ae8c8ad8abee3debd41cfb86f80e395eff44195958b9f70025202aa60dc66 not found: ID does not exist" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.793851 4842 scope.go:117] "RemoveContainer" containerID="800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc" Nov 11 14:00:43 crc kubenswrapper[4842]: E1111 14:00:43.794372 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc\": container with ID starting with 800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc not found: ID does not exist" containerID="800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.794416 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc"} err="failed to get container status \"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc\": rpc error: code = NotFound desc = could not find container \"800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc\": container with ID starting with 800a61fea4c2644ecba41a24e70662f59b9b611b12c7c6a171d260598c57fffc not found: ID does not exist" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.815129 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:43 crc kubenswrapper[4842]: E1111 14:00:43.815675 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-log" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.815703 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-log" Nov 11 14:00:43 crc kubenswrapper[4842]: E1111 14:00:43.815751 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-api" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.815759 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-api" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.815978 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-api" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.816006 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" containerName="nova-api-log" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.817521 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.820401 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.820685 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.820885 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.822819 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.966875 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-internal-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.966969 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23d198b8-bfde-44cc-909f-593c5a1968a3-logs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.966993 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-public-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.967012 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.967030 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrfrh\" (UniqueName: \"kubernetes.io/projected/23d198b8-bfde-44cc-909f-593c5a1968a3-kube-api-access-hrfrh\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:43 crc kubenswrapper[4842]: I1111 14:00:43.967289 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-config-data\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.070251 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23d198b8-bfde-44cc-909f-593c5a1968a3-logs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.070722 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-public-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.070636 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23d198b8-bfde-44cc-909f-593c5a1968a3-logs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.070787 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.070874 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrfrh\" (UniqueName: \"kubernetes.io/projected/23d198b8-bfde-44cc-909f-593c5a1968a3-kube-api-access-hrfrh\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.071843 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-config-data\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.072164 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c5bc79-a738-4ac7-9efd-bc335a8ef954" path="/var/lib/kubelet/pods/49c5bc79-a738-4ac7-9efd-bc335a8ef954/volumes" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.072810 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-internal-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.075971 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-public-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.076354 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-config-data\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.076905 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-internal-tls-certs\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.077184 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23d198b8-bfde-44cc-909f-593c5a1968a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.089555 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrfrh\" (UniqueName: \"kubernetes.io/projected/23d198b8-bfde-44cc-909f-593c5a1968a3-kube-api-access-hrfrh\") pod \"nova-api-0\" (UID: \"23d198b8-bfde-44cc-909f-593c5a1968a3\") " pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.147581 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.592890 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.746715 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"23d198b8-bfde-44cc-909f-593c5a1968a3","Type":"ContainerStarted","Data":"a25c4e44bc1381920b58f650ca37b15b3750ff0f17b3d557a0be2757cde6c53f"} Nov 11 14:00:44 crc kubenswrapper[4842]: I1111 14:00:44.746793 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"23d198b8-bfde-44cc-909f-593c5a1968a3","Type":"ContainerStarted","Data":"3e1446289aca2ac1bd3276c50ca0ddf2345ef2e3e6a8512d7583cc77bd22746e"} Nov 11 14:00:45 crc kubenswrapper[4842]: I1111 14:00:45.762147 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"23d198b8-bfde-44cc-909f-593c5a1968a3","Type":"ContainerStarted","Data":"2f9727dbc7245cc86e8eeaf25499d7a56326bccc2da1398dd0c4c753cb0b9e8b"} Nov 11 14:00:45 crc kubenswrapper[4842]: I1111 14:00:45.787215 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.787160949 podStartE2EDuration="2.787160949s" podCreationTimestamp="2025-11-11 14:00:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:00:45.785001787 +0000 UTC m=+1856.445291426" watchObservedRunningTime="2025-11-11 14:00:45.787160949 +0000 UTC m=+1856.447450568" Nov 11 14:00:46 crc kubenswrapper[4842]: I1111 14:00:46.070932 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 11 14:00:46 crc kubenswrapper[4842]: I1111 14:00:46.558249 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 14:00:46 crc kubenswrapper[4842]: I1111 14:00:46.558340 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 11 14:00:51 crc kubenswrapper[4842]: I1111 14:00:51.068464 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 11 14:00:51 crc kubenswrapper[4842]: I1111 14:00:51.108844 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 11 14:00:51 crc kubenswrapper[4842]: I1111 14:00:51.557869 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 11 14:00:51 crc kubenswrapper[4842]: I1111 14:00:51.557921 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 11 14:00:51 crc kubenswrapper[4842]: I1111 14:00:51.846168 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 11 14:00:52 crc kubenswrapper[4842]: I1111 14:00:52.575271 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d993b82e-e35e-44d7-9e76-1bae92e42c25" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:52 crc kubenswrapper[4842]: I1111 14:00:52.575291 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d993b82e-e35e-44d7-9e76-1bae92e42c25" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.224:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:54 crc kubenswrapper[4842]: I1111 14:00:54.148477 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:54 crc kubenswrapper[4842]: I1111 14:00:54.148785 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 11 14:00:55 crc kubenswrapper[4842]: I1111 14:00:55.163297 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="23d198b8-bfde-44cc-909f-593c5a1968a3" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:55 crc kubenswrapper[4842]: I1111 14:00:55.163297 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="23d198b8-bfde-44cc-909f-593c5a1968a3" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.225:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 11 14:00:56 crc kubenswrapper[4842]: I1111 14:00:56.058905 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:00:56 crc kubenswrapper[4842]: E1111 14:00:56.059524 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.148403 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29381161-b8tsl"] Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.150289 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.156284 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29381161-b8tsl"] Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.294500 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.294771 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.295003 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jdqp\" (UniqueName: \"kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.295090 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.396932 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.397011 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.397085 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jdqp\" (UniqueName: \"kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.397133 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.402984 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.403632 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.403917 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.419092 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jdqp\" (UniqueName: \"kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp\") pod \"keystone-cron-29381161-b8tsl\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:00 crc kubenswrapper[4842]: I1111 14:01:00.510356 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:01 crc kubenswrapper[4842]: W1111 14:01:01.006014 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4401a195_a0c5_46b6_9b52_8e83c88ef55d.slice/crio-5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39 WatchSource:0}: Error finding container 5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39: Status 404 returned error can't find the container with id 5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39 Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.008148 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29381161-b8tsl"] Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.120543 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.562392 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.566248 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.571204 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.914538 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381161-b8tsl" event={"ID":"4401a195-a0c5-46b6-9b52-8e83c88ef55d","Type":"ContainerStarted","Data":"d3e1293b69720c5eb9b90da0950fb9441cbf964fe7fad8a85525bb4ce153865b"} Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.915157 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381161-b8tsl" event={"ID":"4401a195-a0c5-46b6-9b52-8e83c88ef55d","Type":"ContainerStarted","Data":"5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39"} Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.919843 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 11 14:01:01 crc kubenswrapper[4842]: I1111 14:01:01.940533 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29381161-b8tsl" podStartSLOduration=1.94051523 podStartE2EDuration="1.94051523s" podCreationTimestamp="2025-11-11 14:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:01:01.933355187 +0000 UTC m=+1872.593644826" watchObservedRunningTime="2025-11-11 14:01:01.94051523 +0000 UTC m=+1872.600804849" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.156157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.158567 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.160254 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.182983 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.947272 4842 generic.go:334] "Generic (PLEG): container finished" podID="4401a195-a0c5-46b6-9b52-8e83c88ef55d" containerID="d3e1293b69720c5eb9b90da0950fb9441cbf964fe7fad8a85525bb4ce153865b" exitCode=0 Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.947350 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381161-b8tsl" event={"ID":"4401a195-a0c5-46b6-9b52-8e83c88ef55d","Type":"ContainerDied","Data":"d3e1293b69720c5eb9b90da0950fb9441cbf964fe7fad8a85525bb4ce153865b"} Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.947555 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 11 14:01:04 crc kubenswrapper[4842]: I1111 14:01:04.958129 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.311510 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.417828 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle\") pod \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.418452 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys\") pod \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.418594 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jdqp\" (UniqueName: \"kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp\") pod \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.418733 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data\") pod \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\" (UID: \"4401a195-a0c5-46b6-9b52-8e83c88ef55d\") " Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.424126 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp" (OuterVolumeSpecName: "kube-api-access-4jdqp") pod "4401a195-a0c5-46b6-9b52-8e83c88ef55d" (UID: "4401a195-a0c5-46b6-9b52-8e83c88ef55d"). InnerVolumeSpecName "kube-api-access-4jdqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.432298 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4401a195-a0c5-46b6-9b52-8e83c88ef55d" (UID: "4401a195-a0c5-46b6-9b52-8e83c88ef55d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.450682 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4401a195-a0c5-46b6-9b52-8e83c88ef55d" (UID: "4401a195-a0c5-46b6-9b52-8e83c88ef55d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.477365 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data" (OuterVolumeSpecName: "config-data") pod "4401a195-a0c5-46b6-9b52-8e83c88ef55d" (UID: "4401a195-a0c5-46b6-9b52-8e83c88ef55d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.520740 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jdqp\" (UniqueName: \"kubernetes.io/projected/4401a195-a0c5-46b6-9b52-8e83c88ef55d-kube-api-access-4jdqp\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.520786 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.520801 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.520813 4842 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4401a195-a0c5-46b6-9b52-8e83c88ef55d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.966678 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381161-b8tsl" Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.966675 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381161-b8tsl" event={"ID":"4401a195-a0c5-46b6-9b52-8e83c88ef55d","Type":"ContainerDied","Data":"5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39"} Nov 11 14:01:06 crc kubenswrapper[4842]: I1111 14:01:06.966725 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ae2397919a30c40c6b38aa2729b46c8ea409eaf298cbf461a6a389e2b415c39" Nov 11 14:01:07 crc kubenswrapper[4842]: I1111 14:01:07.059813 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:01:07 crc kubenswrapper[4842]: E1111 14:01:07.060094 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:01:12 crc kubenswrapper[4842]: I1111 14:01:12.891412 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:13 crc kubenswrapper[4842]: I1111 14:01:13.639701 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:16 crc kubenswrapper[4842]: I1111 14:01:16.447993 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="rabbitmq" containerID="cri-o://9a10a06dd7fcf421396f8acc5ff985e3c3263a70b5ae97a1dfc89eec8abf72a6" gracePeriod=604797 Nov 11 14:01:16 crc kubenswrapper[4842]: I1111 14:01:16.917914 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="rabbitmq" containerID="cri-o://836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f" gracePeriod=604797 Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.058655 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:01:18 crc kubenswrapper[4842]: E1111 14:01:18.059422 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.075367 4842 generic.go:334] "Generic (PLEG): container finished" podID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerID="9a10a06dd7fcf421396f8acc5ff985e3c3263a70b5ae97a1dfc89eec8abf72a6" exitCode=0 Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.075411 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerDied","Data":"9a10a06dd7fcf421396f8acc5ff985e3c3263a70b5ae97a1dfc89eec8abf72a6"} Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.075436 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"dd8c2bc7-52e3-408c-8a72-3e5978b30a42","Type":"ContainerDied","Data":"49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528"} Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.075447 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49f92f08acd24b3ecf1c46a7098bdd6e26fbf4183b808bdca3654c58e7147528" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.278330 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.367810 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.367857 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.367881 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44hbq\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.367921 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368006 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368020 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368046 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368128 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368198 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368229 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.368270 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info\") pod \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\" (UID: \"dd8c2bc7-52e3-408c-8a72-3e5978b30a42\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.369728 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.371630 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.372357 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.375240 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq" (OuterVolumeSpecName: "kube-api-access-44hbq") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "kube-api-access-44hbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.404303 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info" (OuterVolumeSpecName: "pod-info") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.407587 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.407733 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.418513 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.422580 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data" (OuterVolumeSpecName: "config-data") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472075 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472127 4842 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472143 4842 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-pod-info\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472153 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472163 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472174 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44hbq\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-kube-api-access-44hbq\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472187 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472198 4842 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.472208 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.497797 4842 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.517344 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf" (OuterVolumeSpecName: "server-conf") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.530739 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573095 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573191 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573226 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573255 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whvlf\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573283 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573313 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573350 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573371 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573452 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573516 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573566 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie\") pod \"13087f6b-10cb-421a-b695-84006a81506f\" (UID: \"13087f6b-10cb-421a-b695-84006a81506f\") " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573959 4842 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-server-conf\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.573977 4842 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.574375 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.579820 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.580230 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.580606 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.585311 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.589837 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf" (OuterVolumeSpecName: "kube-api-access-whvlf") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "kube-api-access-whvlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.593489 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.594114 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info" (OuterVolumeSpecName: "pod-info") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.594549 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "dd8c2bc7-52e3-408c-8a72-3e5978b30a42" (UID: "dd8c2bc7-52e3-408c-8a72-3e5978b30a42"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.619006 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data" (OuterVolumeSpecName: "config-data") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676728 4842 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676765 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676777 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whvlf\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-kube-api-access-whvlf\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676791 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676803 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676830 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676839 4842 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/13087f6b-10cb-421a-b695-84006a81506f-pod-info\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676848 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676858 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dd8c2bc7-52e3-408c-8a72-3e5978b30a42-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.676866 4842 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/13087f6b-10cb-421a-b695-84006a81506f-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.682775 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf" (OuterVolumeSpecName: "server-conf") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.720217 4842 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.730805 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "13087f6b-10cb-421a-b695-84006a81506f" (UID: "13087f6b-10cb-421a-b695-84006a81506f"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.779379 4842 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.779426 4842 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/13087f6b-10cb-421a-b695-84006a81506f-server-conf\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:18 crc kubenswrapper[4842]: I1111 14:01:18.779442 4842 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/13087f6b-10cb-421a-b695-84006a81506f-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091626 4842 generic.go:334] "Generic (PLEG): container finished" podID="13087f6b-10cb-421a-b695-84006a81506f" containerID="836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f" exitCode=0 Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091753 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerDied","Data":"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f"} Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091797 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"13087f6b-10cb-421a-b695-84006a81506f","Type":"ContainerDied","Data":"15da46f82d0c034a572c091a9b5cfd380013c902c2c6e847e2cd33f797be9c94"} Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091815 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091819 4842 scope.go:117] "RemoveContainer" containerID="836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.091895 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.129208 4842 scope.go:117] "RemoveContainer" containerID="8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.147180 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.151185 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.169167 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.177600 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.185911 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.186504 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4401a195-a0c5-46b6-9b52-8e83c88ef55d" containerName="keystone-cron" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186529 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="4401a195-a0c5-46b6-9b52-8e83c88ef55d" containerName="keystone-cron" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.186559 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="setup-container" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186582 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="setup-container" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.186600 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186607 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.186622 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="setup-container" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186628 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="setup-container" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.186655 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186662 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186868 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186892 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="4401a195-a0c5-46b6-9b52-8e83c88ef55d" containerName="keystone-cron" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.186918 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="13087f6b-10cb-421a-b695-84006a81506f" containerName="rabbitmq" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.187301 4842 scope.go:117] "RemoveContainer" containerID="836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.188147 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.193300 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f\": container with ID starting with 836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f not found: ID does not exist" containerID="836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.193353 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f"} err="failed to get container status \"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f\": rpc error: code = NotFound desc = could not find container \"836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f\": container with ID starting with 836a2da32a3c0c0f4e8eeba299a0f9af49edc6dff0b030b4f77123d8efb0389f not found: ID does not exist" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.193387 4842 scope.go:117] "RemoveContainer" containerID="8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.193922 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.194169 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.194720 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.194797 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.194899 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.195053 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mt5mv" Nov 11 14:01:19 crc kubenswrapper[4842]: E1111 14:01:19.195355 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf\": container with ID starting with 8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf not found: ID does not exist" containerID="8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.195394 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf"} err="failed to get container status \"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf\": rpc error: code = NotFound desc = could not find container \"8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf\": container with ID starting with 8e6515dda205263bc87f00ed9f9b492c645317d215fc63e3ed6e701d38a2bbdf not found: ID does not exist" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.199619 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.201114 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.211326 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.215854 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221312 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221486 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221583 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-sttpk" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221633 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221700 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.221757 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.242256 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.282722 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290541 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290601 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290638 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290667 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290704 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290754 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-config-data\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290784 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290808 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztj9z\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-kube-api-access-ztj9z\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290871 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290895 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/426f2645-ec57-40e9-b41f-3d1b38a42d04-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290919 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2hlj\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-kube-api-access-c2hlj\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290942 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290970 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.290992 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291034 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291076 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291134 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291158 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291201 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291246 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291288 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.291324 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/426f2645-ec57-40e9-b41f-3d1b38a42d04-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.392523 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.392829 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/426f2645-ec57-40e9-b41f-3d1b38a42d04-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.392929 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2hlj\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-kube-api-access-c2hlj\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393034 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393160 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393248 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393356 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393462 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393574 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393680 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393785 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393892 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.393991 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394230 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/426f2645-ec57-40e9-b41f-3d1b38a42d04-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394344 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394450 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394538 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394625 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394743 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394865 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-config-data\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.394977 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.395072 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztj9z\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-kube-api-access-ztj9z\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.395724 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396049 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396459 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396619 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396758 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396775 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.396785 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.397073 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.397752 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-config-data\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.397992 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.400627 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.402556 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.402643 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/426f2645-ec57-40e9-b41f-3d1b38a42d04-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.416061 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.416582 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.417031 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.417540 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.417710 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztj9z\" (UniqueName: \"kubernetes.io/projected/426f2645-ec57-40e9-b41f-3d1b38a42d04-kube-api-access-ztj9z\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.418321 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/426f2645-ec57-40e9-b41f-3d1b38a42d04-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.429875 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/426f2645-ec57-40e9-b41f-3d1b38a42d04-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.432398 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.440195 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2hlj\" (UniqueName: \"kubernetes.io/projected/d18de6e6-d3e2-41fd-83df-a0d75a1fd978-kube-api-access-c2hlj\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.457545 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-server-0\" (UID: \"d18de6e6-d3e2-41fd-83df-a0d75a1fd978\") " pod="openstack/rabbitmq-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.472828 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"426f2645-ec57-40e9-b41f-3d1b38a42d04\") " pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.537397 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:19 crc kubenswrapper[4842]: I1111 14:01:19.562414 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 11 14:01:20 crc kubenswrapper[4842]: I1111 14:01:20.011247 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 11 14:01:20 crc kubenswrapper[4842]: I1111 14:01:20.075922 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13087f6b-10cb-421a-b695-84006a81506f" path="/var/lib/kubelet/pods/13087f6b-10cb-421a-b695-84006a81506f/volumes" Nov 11 14:01:20 crc kubenswrapper[4842]: I1111 14:01:20.076986 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd8c2bc7-52e3-408c-8a72-3e5978b30a42" path="/var/lib/kubelet/pods/dd8c2bc7-52e3-408c-8a72-3e5978b30a42/volumes" Nov 11 14:01:20 crc kubenswrapper[4842]: I1111 14:01:20.085424 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 11 14:01:20 crc kubenswrapper[4842]: W1111 14:01:20.099686 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd18de6e6_d3e2_41fd_83df_a0d75a1fd978.slice/crio-7f3678e8850b729678221b8fad1be417de9928e6512b08b8e8923f250921f257 WatchSource:0}: Error finding container 7f3678e8850b729678221b8fad1be417de9928e6512b08b8e8923f250921f257: Status 404 returned error can't find the container with id 7f3678e8850b729678221b8fad1be417de9928e6512b08b8e8923f250921f257 Nov 11 14:01:20 crc kubenswrapper[4842]: I1111 14:01:20.118016 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"426f2645-ec57-40e9-b41f-3d1b38a42d04","Type":"ContainerStarted","Data":"4f3672e18b484b3312ff7e5d1f3b6c18bea7960b6db6aa5572cc9c7e89d43abb"} Nov 11 14:01:21 crc kubenswrapper[4842]: I1111 14:01:21.134920 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d18de6e6-d3e2-41fd-83df-a0d75a1fd978","Type":"ContainerStarted","Data":"7f3678e8850b729678221b8fad1be417de9928e6512b08b8e8923f250921f257"} Nov 11 14:01:22 crc kubenswrapper[4842]: I1111 14:01:22.145177 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d18de6e6-d3e2-41fd-83df-a0d75a1fd978","Type":"ContainerStarted","Data":"01e5a851e7bca71690fb76606a9d5558853c1e95d074340d40ab9b9beb185231"} Nov 11 14:01:22 crc kubenswrapper[4842]: I1111 14:01:22.146758 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"426f2645-ec57-40e9-b41f-3d1b38a42d04","Type":"ContainerStarted","Data":"3a5f98fc432e3bc154f9ebcf5d9c8ed2a2cca743d1fc9ecf449473bcee3cc1c5"} Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.503997 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.506247 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.508949 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.536199 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636250 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636327 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636365 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcv5d\" (UniqueName: \"kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636389 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636528 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636572 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.636615 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738488 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738559 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738593 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcv5d\" (UniqueName: \"kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738611 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738714 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.738753 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.739575 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.739674 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.739698 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.739760 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.740297 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.740345 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.740385 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.762046 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcv5d\" (UniqueName: \"kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d\") pod \"dnsmasq-dns-b6dcd6bbc-knwzt\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:28 crc kubenswrapper[4842]: I1111 14:01:28.825698 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:29 crc kubenswrapper[4842]: I1111 14:01:29.308959 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:30 crc kubenswrapper[4842]: I1111 14:01:30.233479 4842 generic.go:334] "Generic (PLEG): container finished" podID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerID="532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a" exitCode=0 Nov 11 14:01:30 crc kubenswrapper[4842]: I1111 14:01:30.233524 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" event={"ID":"04cbafac-c1c9-427b-9543-d3bc2b87cc74","Type":"ContainerDied","Data":"532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a"} Nov 11 14:01:30 crc kubenswrapper[4842]: I1111 14:01:30.233803 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" event={"ID":"04cbafac-c1c9-427b-9543-d3bc2b87cc74","Type":"ContainerStarted","Data":"412fa0b0a835ac1b18075313278a0cfd9c33270ef0b97866a570c44876ea390a"} Nov 11 14:01:31 crc kubenswrapper[4842]: I1111 14:01:31.256318 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" event={"ID":"04cbafac-c1c9-427b-9543-d3bc2b87cc74","Type":"ContainerStarted","Data":"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472"} Nov 11 14:01:31 crc kubenswrapper[4842]: I1111 14:01:31.257353 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:31 crc kubenswrapper[4842]: I1111 14:01:31.279401 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" podStartSLOduration=3.279381763 podStartE2EDuration="3.279381763s" podCreationTimestamp="2025-11-11 14:01:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:01:31.27788249 +0000 UTC m=+1901.938172109" watchObservedRunningTime="2025-11-11 14:01:31.279381763 +0000 UTC m=+1901.939671382" Nov 11 14:01:32 crc kubenswrapper[4842]: I1111 14:01:32.059331 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:01:32 crc kubenswrapper[4842]: E1111 14:01:32.059586 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:01:38 crc kubenswrapper[4842]: I1111 14:01:38.827737 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:38 crc kubenswrapper[4842]: I1111 14:01:38.896243 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:01:38 crc kubenswrapper[4842]: I1111 14:01:38.896783 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="dnsmasq-dns" containerID="cri-o://7caa17dfc7ebb2b563b83027afcc4dd6733af872342a0a89ffdbe5416b53cefa" gracePeriod=10 Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.116156 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-846db6f4bf-jr7k5"] Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.117899 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.138085 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-846db6f4bf-jr7k5"] Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241243 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-config\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241288 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-swift-storage-0\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241400 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-openstack-edpm-ipam\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241452 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stk7m\" (UniqueName: \"kubernetes.io/projected/e6ec519f-08c0-4b5d-a698-3ecb9933613f-kube-api-access-stk7m\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241506 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-svc\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241575 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-nb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.241593 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-sb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.341025 4842 generic.go:334] "Generic (PLEG): container finished" podID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerID="7caa17dfc7ebb2b563b83027afcc4dd6733af872342a0a89ffdbe5416b53cefa" exitCode=0 Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.341073 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" event={"ID":"aa32bfef-1280-45cd-bcc7-abaa0485335a","Type":"ContainerDied","Data":"7caa17dfc7ebb2b563b83027afcc4dd6733af872342a0a89ffdbe5416b53cefa"} Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343332 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-config\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343365 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-swift-storage-0\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343471 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-openstack-edpm-ipam\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343512 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stk7m\" (UniqueName: \"kubernetes.io/projected/e6ec519f-08c0-4b5d-a698-3ecb9933613f-kube-api-access-stk7m\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343562 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-svc\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343640 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-nb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.343661 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-sb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.344443 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-sb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.345007 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-svc\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.345328 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-dns-swift-storage-0\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.345463 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-config\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.345643 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-ovsdbserver-nb\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.345910 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e6ec519f-08c0-4b5d-a698-3ecb9933613f-openstack-edpm-ipam\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.369233 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stk7m\" (UniqueName: \"kubernetes.io/projected/e6ec519f-08c0-4b5d-a698-3ecb9933613f-kube-api-access-stk7m\") pod \"dnsmasq-dns-846db6f4bf-jr7k5\" (UID: \"e6ec519f-08c0-4b5d-a698-3ecb9933613f\") " pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.454543 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.588173 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649054 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649195 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfdzt\" (UniqueName: \"kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649239 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649294 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649350 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.649424 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config\") pod \"aa32bfef-1280-45cd-bcc7-abaa0485335a\" (UID: \"aa32bfef-1280-45cd-bcc7-abaa0485335a\") " Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.654329 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt" (OuterVolumeSpecName: "kube-api-access-zfdzt") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "kube-api-access-zfdzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.703430 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.751468 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.752031 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.752055 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.752064 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfdzt\" (UniqueName: \"kubernetes.io/projected/aa32bfef-1280-45cd-bcc7-abaa0485335a-kube-api-access-zfdzt\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.760980 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config" (OuterVolumeSpecName: "config") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.798980 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.808363 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-846db6f4bf-jr7k5"] Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.811892 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa32bfef-1280-45cd-bcc7-abaa0485335a" (UID: "aa32bfef-1280-45cd-bcc7-abaa0485335a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.857730 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.857776 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-config\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:39 crc kubenswrapper[4842]: I1111 14:01:39.857802 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa32bfef-1280-45cd-bcc7-abaa0485335a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.350872 4842 generic.go:334] "Generic (PLEG): container finished" podID="e6ec519f-08c0-4b5d-a698-3ecb9933613f" containerID="435c294bde00e9e1fabb106d444aad7e8fc7486228fb9538ef3f42eeb465e36d" exitCode=0 Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.351240 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" event={"ID":"e6ec519f-08c0-4b5d-a698-3ecb9933613f","Type":"ContainerDied","Data":"435c294bde00e9e1fabb106d444aad7e8fc7486228fb9538ef3f42eeb465e36d"} Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.351267 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" event={"ID":"e6ec519f-08c0-4b5d-a698-3ecb9933613f","Type":"ContainerStarted","Data":"7dfc6146b2e94adcbd2f29d3d2fe2af388c96016f2307f9d6744dd5d453385ca"} Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.352937 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" event={"ID":"aa32bfef-1280-45cd-bcc7-abaa0485335a","Type":"ContainerDied","Data":"76a47178cd53d2601a44a9ba36bd5d1adb4bdaaffef3a0f7d920c744dbe56115"} Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.352975 4842 scope.go:117] "RemoveContainer" containerID="7caa17dfc7ebb2b563b83027afcc4dd6733af872342a0a89ffdbe5416b53cefa" Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.353015 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5555bddbdf-g5rkh" Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.398005 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.408925 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5555bddbdf-g5rkh"] Nov 11 14:01:40 crc kubenswrapper[4842]: I1111 14:01:40.530321 4842 scope.go:117] "RemoveContainer" containerID="49f84e3057d46c74c17574d89d867a97437dd38f7817c8d9b0272214ba4d06a1" Nov 11 14:01:41 crc kubenswrapper[4842]: I1111 14:01:41.366064 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" event={"ID":"e6ec519f-08c0-4b5d-a698-3ecb9933613f","Type":"ContainerStarted","Data":"3574ccbb5466ea88da6ac2428f8a01a6655cffcbe35488ca34b0e6db7f61ed5a"} Nov 11 14:01:41 crc kubenswrapper[4842]: I1111 14:01:41.366366 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:41 crc kubenswrapper[4842]: I1111 14:01:41.384150 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" podStartSLOduration=2.384130229 podStartE2EDuration="2.384130229s" podCreationTimestamp="2025-11-11 14:01:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:01:41.381370641 +0000 UTC m=+1912.041660260" watchObservedRunningTime="2025-11-11 14:01:41.384130229 +0000 UTC m=+1912.044419848" Nov 11 14:01:42 crc kubenswrapper[4842]: I1111 14:01:42.070723 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" path="/var/lib/kubelet/pods/aa32bfef-1280-45cd-bcc7-abaa0485335a/volumes" Nov 11 14:01:45 crc kubenswrapper[4842]: I1111 14:01:45.058942 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:01:45 crc kubenswrapper[4842]: E1111 14:01:45.059439 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:01:49 crc kubenswrapper[4842]: I1111 14:01:49.457422 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-846db6f4bf-jr7k5" Nov 11 14:01:49 crc kubenswrapper[4842]: I1111 14:01:49.557940 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:49 crc kubenswrapper[4842]: I1111 14:01:49.558393 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="dnsmasq-dns" containerID="cri-o://ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472" gracePeriod=10 Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.102506 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.176965 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.177056 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.177821 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.177903 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcv5d\" (UniqueName: \"kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.177935 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.178026 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.178135 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb\") pod \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\" (UID: \"04cbafac-c1c9-427b-9543-d3bc2b87cc74\") " Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.183278 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d" (OuterVolumeSpecName: "kube-api-access-lcv5d") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "kube-api-access-lcv5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.238661 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.240063 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config" (OuterVolumeSpecName: "config") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.245192 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.246197 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.255820 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.274705 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "04cbafac-c1c9-427b-9543-d3bc2b87cc74" (UID: "04cbafac-c1c9-427b-9543-d3bc2b87cc74"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281211 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281250 4842 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281265 4842 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281278 4842 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281292 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcv5d\" (UniqueName: \"kubernetes.io/projected/04cbafac-c1c9-427b-9543-d3bc2b87cc74-kube-api-access-lcv5d\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281305 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.281319 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04cbafac-c1c9-427b-9543-d3bc2b87cc74-config\") on node \"crc\" DevicePath \"\"" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.444706 4842 generic.go:334] "Generic (PLEG): container finished" podID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerID="ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472" exitCode=0 Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.444745 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" event={"ID":"04cbafac-c1c9-427b-9543-d3bc2b87cc74","Type":"ContainerDied","Data":"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472"} Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.444771 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" event={"ID":"04cbafac-c1c9-427b-9543-d3bc2b87cc74","Type":"ContainerDied","Data":"412fa0b0a835ac1b18075313278a0cfd9c33270ef0b97866a570c44876ea390a"} Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.444787 4842 scope.go:117] "RemoveContainer" containerID="ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.444908 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b6dcd6bbc-knwzt" Nov 11 14:01:50 crc kubenswrapper[4842]: E1111 14:01:50.479838 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04cbafac_c1c9_427b_9543_d3bc2b87cc74.slice/crio-412fa0b0a835ac1b18075313278a0cfd9c33270ef0b97866a570c44876ea390a\": RecentStats: unable to find data in memory cache]" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.484366 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.485221 4842 scope.go:117] "RemoveContainer" containerID="532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.492781 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b6dcd6bbc-knwzt"] Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.505574 4842 scope.go:117] "RemoveContainer" containerID="ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472" Nov 11 14:01:50 crc kubenswrapper[4842]: E1111 14:01:50.505915 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472\": container with ID starting with ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472 not found: ID does not exist" containerID="ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.505948 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472"} err="failed to get container status \"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472\": rpc error: code = NotFound desc = could not find container \"ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472\": container with ID starting with ec30b53bd286d81765ab261500f7770dbe9ff8ab074932fbaadca98c19c68472 not found: ID does not exist" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.505970 4842 scope.go:117] "RemoveContainer" containerID="532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a" Nov 11 14:01:50 crc kubenswrapper[4842]: E1111 14:01:50.506279 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a\": container with ID starting with 532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a not found: ID does not exist" containerID="532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a" Nov 11 14:01:50 crc kubenswrapper[4842]: I1111 14:01:50.506301 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a"} err="failed to get container status \"532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a\": rpc error: code = NotFound desc = could not find container \"532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a\": container with ID starting with 532815f7e7cbbc0f95751941613296e69f2bc75e2b925d33b0342c4e22ef331a not found: ID does not exist" Nov 11 14:01:52 crc kubenswrapper[4842]: I1111 14:01:52.069570 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" path="/var/lib/kubelet/pods/04cbafac-c1c9-427b-9543-d3bc2b87cc74/volumes" Nov 11 14:01:53 crc kubenswrapper[4842]: I1111 14:01:53.476939 4842 generic.go:334] "Generic (PLEG): container finished" podID="d18de6e6-d3e2-41fd-83df-a0d75a1fd978" containerID="01e5a851e7bca71690fb76606a9d5558853c1e95d074340d40ab9b9beb185231" exitCode=0 Nov 11 14:01:53 crc kubenswrapper[4842]: I1111 14:01:53.476982 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d18de6e6-d3e2-41fd-83df-a0d75a1fd978","Type":"ContainerDied","Data":"01e5a851e7bca71690fb76606a9d5558853c1e95d074340d40ab9b9beb185231"} Nov 11 14:01:54 crc kubenswrapper[4842]: I1111 14:01:54.487299 4842 generic.go:334] "Generic (PLEG): container finished" podID="426f2645-ec57-40e9-b41f-3d1b38a42d04" containerID="3a5f98fc432e3bc154f9ebcf5d9c8ed2a2cca743d1fc9ecf449473bcee3cc1c5" exitCode=0 Nov 11 14:01:54 crc kubenswrapper[4842]: I1111 14:01:54.487370 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"426f2645-ec57-40e9-b41f-3d1b38a42d04","Type":"ContainerDied","Data":"3a5f98fc432e3bc154f9ebcf5d9c8ed2a2cca743d1fc9ecf449473bcee3cc1c5"} Nov 11 14:01:54 crc kubenswrapper[4842]: I1111 14:01:54.490374 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d18de6e6-d3e2-41fd-83df-a0d75a1fd978","Type":"ContainerStarted","Data":"afc6e4615aba13dabdda39b89e2638101db4112a811136df94a6e955ed55cfdf"} Nov 11 14:01:54 crc kubenswrapper[4842]: I1111 14:01:54.490662 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 11 14:01:54 crc kubenswrapper[4842]: I1111 14:01:54.550692 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=35.550668246 podStartE2EDuration="35.550668246s" podCreationTimestamp="2025-11-11 14:01:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:01:54.544717208 +0000 UTC m=+1925.205006847" watchObservedRunningTime="2025-11-11 14:01:54.550668246 +0000 UTC m=+1925.210957865" Nov 11 14:01:55 crc kubenswrapper[4842]: I1111 14:01:55.501975 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"426f2645-ec57-40e9-b41f-3d1b38a42d04","Type":"ContainerStarted","Data":"c4ec25545667f15b442f9e11f2e467fe295d437b97bc1f3f9cd3bea3da8e650c"} Nov 11 14:01:55 crc kubenswrapper[4842]: I1111 14:01:55.502344 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:01:55 crc kubenswrapper[4842]: I1111 14:01:55.535880 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.535862917 podStartE2EDuration="36.535862917s" podCreationTimestamp="2025-11-11 14:01:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:01:55.533002926 +0000 UTC m=+1926.193292545" watchObservedRunningTime="2025-11-11 14:01:55.535862917 +0000 UTC m=+1926.196152536" Nov 11 14:01:59 crc kubenswrapper[4842]: I1111 14:01:59.059724 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:01:59 crc kubenswrapper[4842]: E1111 14:01:59.061437 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.594855 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2"] Nov 11 14:02:07 crc kubenswrapper[4842]: E1111 14:02:07.595898 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="init" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.595916 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="init" Nov 11 14:02:07 crc kubenswrapper[4842]: E1111 14:02:07.595935 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.595942 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: E1111 14:02:07.595962 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.595968 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: E1111 14:02:07.595978 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="init" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.595984 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="init" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.596216 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="04cbafac-c1c9-427b-9543-d3bc2b87cc74" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.596237 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa32bfef-1280-45cd-bcc7-abaa0485335a" containerName="dnsmasq-dns" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.596975 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.600260 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.600756 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.600849 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.602369 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.615018 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2"] Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.715309 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.715394 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77z5c\" (UniqueName: \"kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.715440 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.715486 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.818165 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.818257 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77z5c\" (UniqueName: \"kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.818304 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.818324 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.826257 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.829692 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.830164 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.844576 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77z5c\" (UniqueName: \"kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:07 crc kubenswrapper[4842]: I1111 14:02:07.921452 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:08 crc kubenswrapper[4842]: W1111 14:02:08.510927 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50a93092_7567_4563_a8cc_9393aaf10eae.slice/crio-9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd WatchSource:0}: Error finding container 9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd: Status 404 returned error can't find the container with id 9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd Nov 11 14:02:08 crc kubenswrapper[4842]: I1111 14:02:08.512700 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2"] Nov 11 14:02:08 crc kubenswrapper[4842]: I1111 14:02:08.513383 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:02:08 crc kubenswrapper[4842]: I1111 14:02:08.613660 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" event={"ID":"50a93092-7567-4563-a8cc-9393aaf10eae","Type":"ContainerStarted","Data":"9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd"} Nov 11 14:02:09 crc kubenswrapper[4842]: I1111 14:02:09.541374 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 11 14:02:09 crc kubenswrapper[4842]: I1111 14:02:09.565391 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 11 14:02:12 crc kubenswrapper[4842]: I1111 14:02:12.059424 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:02:12 crc kubenswrapper[4842]: E1111 14:02:12.060299 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:02:18 crc kubenswrapper[4842]: I1111 14:02:18.714644 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" event={"ID":"50a93092-7567-4563-a8cc-9393aaf10eae","Type":"ContainerStarted","Data":"57853a3e2fa5ef70dca19b838668b0b0e6398c80483a665f258a82d4a59eedb0"} Nov 11 14:02:18 crc kubenswrapper[4842]: I1111 14:02:18.733624 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" podStartSLOduration=2.180218385 podStartE2EDuration="11.73360149s" podCreationTimestamp="2025-11-11 14:02:07 +0000 UTC" firstStartedPulling="2025-11-11 14:02:08.513193567 +0000 UTC m=+1939.173483186" lastFinishedPulling="2025-11-11 14:02:18.066576672 +0000 UTC m=+1948.726866291" observedRunningTime="2025-11-11 14:02:18.729141515 +0000 UTC m=+1949.389431134" watchObservedRunningTime="2025-11-11 14:02:18.73360149 +0000 UTC m=+1949.393891109" Nov 11 14:02:26 crc kubenswrapper[4842]: I1111 14:02:26.060938 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:02:26 crc kubenswrapper[4842]: E1111 14:02:26.061814 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:02:30 crc kubenswrapper[4842]: I1111 14:02:30.823156 4842 generic.go:334] "Generic (PLEG): container finished" podID="50a93092-7567-4563-a8cc-9393aaf10eae" containerID="57853a3e2fa5ef70dca19b838668b0b0e6398c80483a665f258a82d4a59eedb0" exitCode=0 Nov 11 14:02:30 crc kubenswrapper[4842]: I1111 14:02:30.823241 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" event={"ID":"50a93092-7567-4563-a8cc-9393aaf10eae","Type":"ContainerDied","Data":"57853a3e2fa5ef70dca19b838668b0b0e6398c80483a665f258a82d4a59eedb0"} Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.299740 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.427259 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory\") pod \"50a93092-7567-4563-a8cc-9393aaf10eae\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.427339 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle\") pod \"50a93092-7567-4563-a8cc-9393aaf10eae\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.427384 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77z5c\" (UniqueName: \"kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c\") pod \"50a93092-7567-4563-a8cc-9393aaf10eae\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.427426 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key\") pod \"50a93092-7567-4563-a8cc-9393aaf10eae\" (UID: \"50a93092-7567-4563-a8cc-9393aaf10eae\") " Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.433807 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "50a93092-7567-4563-a8cc-9393aaf10eae" (UID: "50a93092-7567-4563-a8cc-9393aaf10eae"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.435977 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c" (OuterVolumeSpecName: "kube-api-access-77z5c") pod "50a93092-7567-4563-a8cc-9393aaf10eae" (UID: "50a93092-7567-4563-a8cc-9393aaf10eae"). InnerVolumeSpecName "kube-api-access-77z5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.459272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory" (OuterVolumeSpecName: "inventory") pod "50a93092-7567-4563-a8cc-9393aaf10eae" (UID: "50a93092-7567-4563-a8cc-9393aaf10eae"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.461662 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "50a93092-7567-4563-a8cc-9393aaf10eae" (UID: "50a93092-7567-4563-a8cc-9393aaf10eae"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.530151 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.530182 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.530192 4842 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a93092-7567-4563-a8cc-9393aaf10eae-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.530202 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77z5c\" (UniqueName: \"kubernetes.io/projected/50a93092-7567-4563-a8cc-9393aaf10eae-kube-api-access-77z5c\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.841952 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" event={"ID":"50a93092-7567-4563-a8cc-9393aaf10eae","Type":"ContainerDied","Data":"9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd"} Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.842473 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9121c103dfd19f2cbead96f33b828cd9ead22d4f39665215f78f6e240e1805bd" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.842038 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.914293 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4"] Nov 11 14:02:32 crc kubenswrapper[4842]: E1111 14:02:32.914803 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50a93092-7567-4563-a8cc-9393aaf10eae" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.914827 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="50a93092-7567-4563-a8cc-9393aaf10eae" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.915068 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="50a93092-7567-4563-a8cc-9393aaf10eae" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.915815 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.919052 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.919880 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.920115 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.920286 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:02:32 crc kubenswrapper[4842]: I1111 14:02:32.923633 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4"] Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.039540 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.039596 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dtbc\" (UniqueName: \"kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.039644 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.141509 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.141570 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dtbc\" (UniqueName: \"kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.141972 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.145599 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.145809 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.157497 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dtbc\" (UniqueName: \"kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wz4g4\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.236707 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.802764 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4"] Nov 11 14:02:33 crc kubenswrapper[4842]: W1111 14:02:33.805079 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8e3c3e6_73ba_490d_b0f5_c99a557f7129.slice/crio-74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db WatchSource:0}: Error finding container 74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db: Status 404 returned error can't find the container with id 74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db Nov 11 14:02:33 crc kubenswrapper[4842]: I1111 14:02:33.851952 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" event={"ID":"c8e3c3e6-73ba-490d-b0f5-c99a557f7129","Type":"ContainerStarted","Data":"74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db"} Nov 11 14:02:34 crc kubenswrapper[4842]: I1111 14:02:34.863881 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" event={"ID":"c8e3c3e6-73ba-490d-b0f5-c99a557f7129","Type":"ContainerStarted","Data":"e4f819eaa003a4b97b3c564b19d0edfad8a6e9ac74de0cb8b6397110c91d2615"} Nov 11 14:02:34 crc kubenswrapper[4842]: I1111 14:02:34.882611 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" podStartSLOduration=2.433004712 podStartE2EDuration="2.882587748s" podCreationTimestamp="2025-11-11 14:02:32 +0000 UTC" firstStartedPulling="2025-11-11 14:02:33.809325178 +0000 UTC m=+1964.469614797" lastFinishedPulling="2025-11-11 14:02:34.258908214 +0000 UTC m=+1964.919197833" observedRunningTime="2025-11-11 14:02:34.88088902 +0000 UTC m=+1965.541178639" watchObservedRunningTime="2025-11-11 14:02:34.882587748 +0000 UTC m=+1965.542877367" Nov 11 14:02:37 crc kubenswrapper[4842]: I1111 14:02:37.892924 4842 generic.go:334] "Generic (PLEG): container finished" podID="c8e3c3e6-73ba-490d-b0f5-c99a557f7129" containerID="e4f819eaa003a4b97b3c564b19d0edfad8a6e9ac74de0cb8b6397110c91d2615" exitCode=0 Nov 11 14:02:37 crc kubenswrapper[4842]: I1111 14:02:37.892989 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" event={"ID":"c8e3c3e6-73ba-490d-b0f5-c99a557f7129","Type":"ContainerDied","Data":"e4f819eaa003a4b97b3c564b19d0edfad8a6e9ac74de0cb8b6397110c91d2615"} Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.059390 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:02:39 crc kubenswrapper[4842]: E1111 14:02:39.059737 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.334942 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.376488 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key\") pod \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.376562 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dtbc\" (UniqueName: \"kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc\") pod \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.376692 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory\") pod \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\" (UID: \"c8e3c3e6-73ba-490d-b0f5-c99a557f7129\") " Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.382166 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc" (OuterVolumeSpecName: "kube-api-access-2dtbc") pod "c8e3c3e6-73ba-490d-b0f5-c99a557f7129" (UID: "c8e3c3e6-73ba-490d-b0f5-c99a557f7129"). InnerVolumeSpecName "kube-api-access-2dtbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.403757 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c8e3c3e6-73ba-490d-b0f5-c99a557f7129" (UID: "c8e3c3e6-73ba-490d-b0f5-c99a557f7129"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.408278 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory" (OuterVolumeSpecName: "inventory") pod "c8e3c3e6-73ba-490d-b0f5-c99a557f7129" (UID: "c8e3c3e6-73ba-490d-b0f5-c99a557f7129"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.479331 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.479365 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dtbc\" (UniqueName: \"kubernetes.io/projected/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-kube-api-access-2dtbc\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.479378 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8e3c3e6-73ba-490d-b0f5-c99a557f7129-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.918570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" event={"ID":"c8e3c3e6-73ba-490d-b0f5-c99a557f7129","Type":"ContainerDied","Data":"74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db"} Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.918624 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74ccc722f25784079020264cef93e6c55074356e495d28ed241dadfdd32294db" Nov 11 14:02:39 crc kubenswrapper[4842]: I1111 14:02:39.918626 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wz4g4" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.024513 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2"] Nov 11 14:02:40 crc kubenswrapper[4842]: E1111 14:02:40.025353 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8e3c3e6-73ba-490d-b0f5-c99a557f7129" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.025372 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8e3c3e6-73ba-490d-b0f5-c99a557f7129" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.025794 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8e3c3e6-73ba-490d-b0f5-c99a557f7129" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.028077 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.037443 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.037680 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.037857 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.038031 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.053294 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2"] Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.091968 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.092081 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.092151 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.092320 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m68xv\" (UniqueName: \"kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.193956 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.194013 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.194121 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m68xv\" (UniqueName: \"kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.194205 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.198490 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.198650 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.206941 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.209726 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m68xv\" (UniqueName: \"kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.355324 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.857640 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2"] Nov 11 14:02:40 crc kubenswrapper[4842]: I1111 14:02:40.928758 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" event={"ID":"cbeea580-daef-4e97-898b-c194a52a4e97","Type":"ContainerStarted","Data":"decefdcb3197f149ed8ca0f78362a75cbad84cf4ea190f6b145d41d4a7e049b5"} Nov 11 14:02:41 crc kubenswrapper[4842]: I1111 14:02:41.962013 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" event={"ID":"cbeea580-daef-4e97-898b-c194a52a4e97","Type":"ContainerStarted","Data":"4417c892eb2a419d2cfc502280f6d9142363c6a9c58d6642f9638b716596b8a1"} Nov 11 14:02:41 crc kubenswrapper[4842]: I1111 14:02:41.983647 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" podStartSLOduration=2.576629241 podStartE2EDuration="2.983627583s" podCreationTimestamp="2025-11-11 14:02:39 +0000 UTC" firstStartedPulling="2025-11-11 14:02:40.8610507 +0000 UTC m=+1971.521340319" lastFinishedPulling="2025-11-11 14:02:41.268049042 +0000 UTC m=+1971.928338661" observedRunningTime="2025-11-11 14:02:41.978651352 +0000 UTC m=+1972.638940971" watchObservedRunningTime="2025-11-11 14:02:41.983627583 +0000 UTC m=+1972.643917202" Nov 11 14:02:51 crc kubenswrapper[4842]: I1111 14:02:51.059727 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:02:52 crc kubenswrapper[4842]: I1111 14:02:52.096016 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70"} Nov 11 14:02:58 crc kubenswrapper[4842]: I1111 14:02:58.139986 4842 scope.go:117] "RemoveContainer" containerID="d6967b776d91d6c0f11a25b3c2eac73ec5c2310ad6fce5ea72129cda25a38a7a" Nov 11 14:02:58 crc kubenswrapper[4842]: I1111 14:02:58.172355 4842 scope.go:117] "RemoveContainer" containerID="9a10a06dd7fcf421396f8acc5ff985e3c3263a70b5ae97a1dfc89eec8abf72a6" Nov 11 14:02:58 crc kubenswrapper[4842]: I1111 14:02:58.212892 4842 scope.go:117] "RemoveContainer" containerID="b7c2238f1340635d5115dd9fe56f38cf43d59742baa12c7b6cb17a3e757a55bb" Nov 11 14:03:58 crc kubenswrapper[4842]: I1111 14:03:58.290466 4842 scope.go:117] "RemoveContainer" containerID="78398445b8e7bfb04472702d7713c57265db0714a60254fbea76db24d4d62c49" Nov 11 14:03:58 crc kubenswrapper[4842]: I1111 14:03:58.460071 4842 scope.go:117] "RemoveContainer" containerID="3eff7858cd7523f0719e1231457101f0d7d42f75444d45843e134aee0e3b38b9" Nov 11 14:03:58 crc kubenswrapper[4842]: I1111 14:03:58.502333 4842 scope.go:117] "RemoveContainer" containerID="e919adb8a44cb6ca0930a1f361a90dc108e936104f9720da3a7a49cd2d6bf57a" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.665361 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.669301 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.675645 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.773514 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.773728 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2jxr\" (UniqueName: \"kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.773768 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.875453 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2jxr\" (UniqueName: \"kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.875517 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.875598 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.876006 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.876248 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.907214 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2jxr\" (UniqueName: \"kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr\") pod \"redhat-operators-dhq59\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:10 crc kubenswrapper[4842]: I1111 14:04:10.994945 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:11 crc kubenswrapper[4842]: I1111 14:04:11.476618 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:11 crc kubenswrapper[4842]: I1111 14:04:11.820779 4842 generic.go:334] "Generic (PLEG): container finished" podID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerID="2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1" exitCode=0 Nov 11 14:04:11 crc kubenswrapper[4842]: I1111 14:04:11.820820 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerDied","Data":"2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1"} Nov 11 14:04:11 crc kubenswrapper[4842]: I1111 14:04:11.820846 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerStarted","Data":"6a63e3fb4061d1929c7ea48086793192498a21c288266bb15ec543c2fac896a5"} Nov 11 14:04:13 crc kubenswrapper[4842]: I1111 14:04:13.842662 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerStarted","Data":"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d"} Nov 11 14:04:14 crc kubenswrapper[4842]: I1111 14:04:14.853453 4842 generic.go:334] "Generic (PLEG): container finished" podID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerID="4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d" exitCode=0 Nov 11 14:04:14 crc kubenswrapper[4842]: I1111 14:04:14.853546 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerDied","Data":"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d"} Nov 11 14:04:15 crc kubenswrapper[4842]: I1111 14:04:15.866307 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerStarted","Data":"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de"} Nov 11 14:04:15 crc kubenswrapper[4842]: I1111 14:04:15.888648 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dhq59" podStartSLOduration=2.149900733 podStartE2EDuration="5.888627806s" podCreationTimestamp="2025-11-11 14:04:10 +0000 UTC" firstStartedPulling="2025-11-11 14:04:11.822866883 +0000 UTC m=+2062.483156502" lastFinishedPulling="2025-11-11 14:04:15.561593946 +0000 UTC m=+2066.221883575" observedRunningTime="2025-11-11 14:04:15.883507484 +0000 UTC m=+2066.543797123" watchObservedRunningTime="2025-11-11 14:04:15.888627806 +0000 UTC m=+2066.548917425" Nov 11 14:04:20 crc kubenswrapper[4842]: I1111 14:04:20.995757 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:20 crc kubenswrapper[4842]: I1111 14:04:20.996308 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:21 crc kubenswrapper[4842]: I1111 14:04:21.044020 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:21 crc kubenswrapper[4842]: I1111 14:04:21.970720 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:22 crc kubenswrapper[4842]: I1111 14:04:22.021573 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:23 crc kubenswrapper[4842]: I1111 14:04:23.938368 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dhq59" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="registry-server" containerID="cri-o://ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de" gracePeriod=2 Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.382366 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.571410 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content\") pod \"ec4b38b6-28f0-4bc5-93ed-057de333e064\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.571790 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2jxr\" (UniqueName: \"kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr\") pod \"ec4b38b6-28f0-4bc5-93ed-057de333e064\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.571837 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities\") pod \"ec4b38b6-28f0-4bc5-93ed-057de333e064\" (UID: \"ec4b38b6-28f0-4bc5-93ed-057de333e064\") " Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.572792 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities" (OuterVolumeSpecName: "utilities") pod "ec4b38b6-28f0-4bc5-93ed-057de333e064" (UID: "ec4b38b6-28f0-4bc5-93ed-057de333e064"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.578307 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr" (OuterVolumeSpecName: "kube-api-access-m2jxr") pod "ec4b38b6-28f0-4bc5-93ed-057de333e064" (UID: "ec4b38b6-28f0-4bc5-93ed-057de333e064"). InnerVolumeSpecName "kube-api-access-m2jxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.657368 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec4b38b6-28f0-4bc5-93ed-057de333e064" (UID: "ec4b38b6-28f0-4bc5-93ed-057de333e064"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.674268 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.674300 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec4b38b6-28f0-4bc5-93ed-057de333e064-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.674311 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2jxr\" (UniqueName: \"kubernetes.io/projected/ec4b38b6-28f0-4bc5-93ed-057de333e064-kube-api-access-m2jxr\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.952169 4842 generic.go:334] "Generic (PLEG): container finished" podID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerID="ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de" exitCode=0 Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.952231 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerDied","Data":"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de"} Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.952269 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dhq59" event={"ID":"ec4b38b6-28f0-4bc5-93ed-057de333e064","Type":"ContainerDied","Data":"6a63e3fb4061d1929c7ea48086793192498a21c288266bb15ec543c2fac896a5"} Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.952294 4842 scope.go:117] "RemoveContainer" containerID="ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.952290 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dhq59" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.975867 4842 scope.go:117] "RemoveContainer" containerID="4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d" Nov 11 14:04:24 crc kubenswrapper[4842]: I1111 14:04:24.996456 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.010808 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dhq59"] Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.017082 4842 scope.go:117] "RemoveContainer" containerID="2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.050260 4842 scope.go:117] "RemoveContainer" containerID="ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de" Nov 11 14:04:25 crc kubenswrapper[4842]: E1111 14:04:25.050940 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de\": container with ID starting with ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de not found: ID does not exist" containerID="ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.050967 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de"} err="failed to get container status \"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de\": rpc error: code = NotFound desc = could not find container \"ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de\": container with ID starting with ba70e33dce9d95434c29fe7d6c6e2f5c9ca2b67d6a6716c2bf0992b0555d99de not found: ID does not exist" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.050990 4842 scope.go:117] "RemoveContainer" containerID="4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d" Nov 11 14:04:25 crc kubenswrapper[4842]: E1111 14:04:25.051309 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d\": container with ID starting with 4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d not found: ID does not exist" containerID="4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.051333 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d"} err="failed to get container status \"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d\": rpc error: code = NotFound desc = could not find container \"4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d\": container with ID starting with 4681672c23f7516017524ecc3604013a532fcc0d493de79fb34881f831b0710d not found: ID does not exist" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.051345 4842 scope.go:117] "RemoveContainer" containerID="2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1" Nov 11 14:04:25 crc kubenswrapper[4842]: E1111 14:04:25.051502 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1\": container with ID starting with 2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1 not found: ID does not exist" containerID="2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1" Nov 11 14:04:25 crc kubenswrapper[4842]: I1111 14:04:25.051520 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1"} err="failed to get container status \"2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1\": rpc error: code = NotFound desc = could not find container \"2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1\": container with ID starting with 2213d70bf1a35ff91d2c31a37a6bb8faa408810c71cf600f6e13e3a6836b13b1 not found: ID does not exist" Nov 11 14:04:26 crc kubenswrapper[4842]: I1111 14:04:26.072806 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" path="/var/lib/kubelet/pods/ec4b38b6-28f0-4bc5-93ed-057de333e064/volumes" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.212179 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:36 crc kubenswrapper[4842]: E1111 14:04:36.213300 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="extract-utilities" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.213319 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="extract-utilities" Nov 11 14:04:36 crc kubenswrapper[4842]: E1111 14:04:36.213345 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="extract-content" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.213354 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="extract-content" Nov 11 14:04:36 crc kubenswrapper[4842]: E1111 14:04:36.213368 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="registry-server" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.213376 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="registry-server" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.213630 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec4b38b6-28f0-4bc5-93ed-057de333e064" containerName="registry-server" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.215675 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.224869 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.265325 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.265424 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmqbw\" (UniqueName: \"kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.265475 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.367326 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.367459 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmqbw\" (UniqueName: \"kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.367511 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.368054 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.368352 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.408666 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmqbw\" (UniqueName: \"kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw\") pod \"community-operators-vmqj8\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:36 crc kubenswrapper[4842]: I1111 14:04:36.540561 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:37 crc kubenswrapper[4842]: I1111 14:04:37.101474 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:38 crc kubenswrapper[4842]: I1111 14:04:38.094306 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerID="c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af" exitCode=0 Nov 11 14:04:38 crc kubenswrapper[4842]: I1111 14:04:38.094442 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerDied","Data":"c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af"} Nov 11 14:04:38 crc kubenswrapper[4842]: I1111 14:04:38.094624 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerStarted","Data":"f32dc986406d7c89809238b937b6c35d63cae18406a29e26acebfdb624794c07"} Nov 11 14:04:39 crc kubenswrapper[4842]: I1111 14:04:39.110862 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerStarted","Data":"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673"} Nov 11 14:04:40 crc kubenswrapper[4842]: I1111 14:04:40.128031 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerID="aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673" exitCode=0 Nov 11 14:04:40 crc kubenswrapper[4842]: I1111 14:04:40.128145 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerDied","Data":"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673"} Nov 11 14:04:41 crc kubenswrapper[4842]: I1111 14:04:41.185786 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerStarted","Data":"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87"} Nov 11 14:04:41 crc kubenswrapper[4842]: I1111 14:04:41.209310 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vmqj8" podStartSLOduration=2.768631495 podStartE2EDuration="5.209287479s" podCreationTimestamp="2025-11-11 14:04:36 +0000 UTC" firstStartedPulling="2025-11-11 14:04:38.097896437 +0000 UTC m=+2088.758186056" lastFinishedPulling="2025-11-11 14:04:40.538552421 +0000 UTC m=+2091.198842040" observedRunningTime="2025-11-11 14:04:41.208153204 +0000 UTC m=+2091.868442833" watchObservedRunningTime="2025-11-11 14:04:41.209287479 +0000 UTC m=+2091.869577088" Nov 11 14:04:46 crc kubenswrapper[4842]: I1111 14:04:46.540739 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:46 crc kubenswrapper[4842]: I1111 14:04:46.541262 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:46 crc kubenswrapper[4842]: I1111 14:04:46.590557 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:47 crc kubenswrapper[4842]: I1111 14:04:47.312162 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:47 crc kubenswrapper[4842]: I1111 14:04:47.360768 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.283053 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vmqj8" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="registry-server" containerID="cri-o://1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87" gracePeriod=2 Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.773683 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.829333 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities\") pod \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.829462 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmqbw\" (UniqueName: \"kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw\") pod \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.829527 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content\") pod \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\" (UID: \"3ae1968c-b310-4a79-887d-e9a3b85bc96e\") " Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.832051 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities" (OuterVolumeSpecName: "utilities") pod "3ae1968c-b310-4a79-887d-e9a3b85bc96e" (UID: "3ae1968c-b310-4a79-887d-e9a3b85bc96e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.835567 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw" (OuterVolumeSpecName: "kube-api-access-dmqbw") pod "3ae1968c-b310-4a79-887d-e9a3b85bc96e" (UID: "3ae1968c-b310-4a79-887d-e9a3b85bc96e"). InnerVolumeSpecName "kube-api-access-dmqbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.931640 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:49 crc kubenswrapper[4842]: I1111 14:04:49.931678 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmqbw\" (UniqueName: \"kubernetes.io/projected/3ae1968c-b310-4a79-887d-e9a3b85bc96e-kube-api-access-dmqbw\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.297204 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerID="1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87" exitCode=0 Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.297270 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerDied","Data":"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87"} Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.297321 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vmqj8" event={"ID":"3ae1968c-b310-4a79-887d-e9a3b85bc96e","Type":"ContainerDied","Data":"f32dc986406d7c89809238b937b6c35d63cae18406a29e26acebfdb624794c07"} Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.297324 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vmqj8" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.297379 4842 scope.go:117] "RemoveContainer" containerID="1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.327500 4842 scope.go:117] "RemoveContainer" containerID="aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.352487 4842 scope.go:117] "RemoveContainer" containerID="c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.401544 4842 scope.go:117] "RemoveContainer" containerID="1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87" Nov 11 14:04:50 crc kubenswrapper[4842]: E1111 14:04:50.401944 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87\": container with ID starting with 1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87 not found: ID does not exist" containerID="1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.401985 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87"} err="failed to get container status \"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87\": rpc error: code = NotFound desc = could not find container \"1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87\": container with ID starting with 1476cae6cb3c2df4d2b7f335aeff1c104419599783cdd470de81d729f45eab87 not found: ID does not exist" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.402014 4842 scope.go:117] "RemoveContainer" containerID="aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673" Nov 11 14:04:50 crc kubenswrapper[4842]: E1111 14:04:50.402437 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673\": container with ID starting with aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673 not found: ID does not exist" containerID="aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.402461 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673"} err="failed to get container status \"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673\": rpc error: code = NotFound desc = could not find container \"aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673\": container with ID starting with aa4271e293cd28a4b51b5fcd1ff77914ebf384c8b7df393f4e028f709a624673 not found: ID does not exist" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.402474 4842 scope.go:117] "RemoveContainer" containerID="c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af" Nov 11 14:04:50 crc kubenswrapper[4842]: E1111 14:04:50.402755 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af\": container with ID starting with c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af not found: ID does not exist" containerID="c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.402801 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af"} err="failed to get container status \"c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af\": rpc error: code = NotFound desc = could not find container \"c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af\": container with ID starting with c2148112e370842bfe7d6b66049358745bfc219a4610b6c7c92f92a2937705af not found: ID does not exist" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.468279 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ae1968c-b310-4a79-887d-e9a3b85bc96e" (UID: "3ae1968c-b310-4a79-887d-e9a3b85bc96e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.545083 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ae1968c-b310-4a79-887d-e9a3b85bc96e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.635684 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:50 crc kubenswrapper[4842]: I1111 14:04:50.645973 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vmqj8"] Nov 11 14:04:52 crc kubenswrapper[4842]: I1111 14:04:52.071973 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" path="/var/lib/kubelet/pods/3ae1968c-b310-4a79-887d-e9a3b85bc96e/volumes" Nov 11 14:04:58 crc kubenswrapper[4842]: I1111 14:04:58.576066 4842 scope.go:117] "RemoveContainer" containerID="0791a9873bb23940d7de46dfd60791c6111bea6e8d77c9b54374bd1415986605" Nov 11 14:04:58 crc kubenswrapper[4842]: I1111 14:04:58.598841 4842 scope.go:117] "RemoveContainer" containerID="97b7851488cc4f30c931f383db72bd8218d550c4569782d8657375ff792d9efa" Nov 11 14:04:58 crc kubenswrapper[4842]: I1111 14:04:58.625798 4842 scope.go:117] "RemoveContainer" containerID="09daa28ebfcb544bf56310fe6618559cc2a117ef31ae97db9f16bbf3ffab473f" Nov 11 14:04:58 crc kubenswrapper[4842]: I1111 14:04:58.672523 4842 scope.go:117] "RemoveContainer" containerID="4ddd1acfef2d5a921400f75f7739e8971fe70c77e8c38202290e522490071115" Nov 11 14:05:14 crc kubenswrapper[4842]: I1111 14:05:14.960944 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:05:14 crc kubenswrapper[4842]: I1111 14:05:14.961535 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:05:44 crc kubenswrapper[4842]: I1111 14:05:44.961409 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:05:44 crc kubenswrapper[4842]: I1111 14:05:44.961991 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:05:53 crc kubenswrapper[4842]: I1111 14:05:53.854578 4842 generic.go:334] "Generic (PLEG): container finished" podID="cbeea580-daef-4e97-898b-c194a52a4e97" containerID="4417c892eb2a419d2cfc502280f6d9142363c6a9c58d6642f9638b716596b8a1" exitCode=0 Nov 11 14:05:53 crc kubenswrapper[4842]: I1111 14:05:53.854694 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" event={"ID":"cbeea580-daef-4e97-898b-c194a52a4e97","Type":"ContainerDied","Data":"4417c892eb2a419d2cfc502280f6d9142363c6a9c58d6642f9638b716596b8a1"} Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.251104 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.402975 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key\") pod \"cbeea580-daef-4e97-898b-c194a52a4e97\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.403031 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle\") pod \"cbeea580-daef-4e97-898b-c194a52a4e97\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.403212 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m68xv\" (UniqueName: \"kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv\") pod \"cbeea580-daef-4e97-898b-c194a52a4e97\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.403286 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory\") pod \"cbeea580-daef-4e97-898b-c194a52a4e97\" (UID: \"cbeea580-daef-4e97-898b-c194a52a4e97\") " Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.409692 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv" (OuterVolumeSpecName: "kube-api-access-m68xv") pod "cbeea580-daef-4e97-898b-c194a52a4e97" (UID: "cbeea580-daef-4e97-898b-c194a52a4e97"). InnerVolumeSpecName "kube-api-access-m68xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.410895 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cbeea580-daef-4e97-898b-c194a52a4e97" (UID: "cbeea580-daef-4e97-898b-c194a52a4e97"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.434960 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cbeea580-daef-4e97-898b-c194a52a4e97" (UID: "cbeea580-daef-4e97-898b-c194a52a4e97"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.437012 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory" (OuterVolumeSpecName: "inventory") pod "cbeea580-daef-4e97-898b-c194a52a4e97" (UID: "cbeea580-daef-4e97-898b-c194a52a4e97"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.505971 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m68xv\" (UniqueName: \"kubernetes.io/projected/cbeea580-daef-4e97-898b-c194a52a4e97-kube-api-access-m68xv\") on node \"crc\" DevicePath \"\"" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.506001 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.506010 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.506018 4842 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbeea580-daef-4e97-898b-c194a52a4e97-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.874910 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" event={"ID":"cbeea580-daef-4e97-898b-c194a52a4e97","Type":"ContainerDied","Data":"decefdcb3197f149ed8ca0f78362a75cbad84cf4ea190f6b145d41d4a7e049b5"} Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.874956 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="decefdcb3197f149ed8ca0f78362a75cbad84cf4ea190f6b145d41d4a7e049b5" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.875026 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.958877 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg"] Nov 11 14:05:55 crc kubenswrapper[4842]: E1111 14:05:55.959525 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="extract-utilities" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.959556 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="extract-utilities" Nov 11 14:05:55 crc kubenswrapper[4842]: E1111 14:05:55.959592 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="extract-content" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.959602 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="extract-content" Nov 11 14:05:55 crc kubenswrapper[4842]: E1111 14:05:55.959622 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="registry-server" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.959630 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="registry-server" Nov 11 14:05:55 crc kubenswrapper[4842]: E1111 14:05:55.959654 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbeea580-daef-4e97-898b-c194a52a4e97" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.959673 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbeea580-daef-4e97-898b-c194a52a4e97" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.959979 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ae1968c-b310-4a79-887d-e9a3b85bc96e" containerName="registry-server" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.960014 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbeea580-daef-4e97-898b-c194a52a4e97" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.960938 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.965467 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.965478 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.965584 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.965669 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:05:55 crc kubenswrapper[4842]: I1111 14:05:55.970180 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg"] Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.118699 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh8rr\" (UniqueName: \"kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.118750 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.118790 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.221776 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh8rr\" (UniqueName: \"kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.221877 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.221956 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.225889 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.227053 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.238570 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh8rr\" (UniqueName: \"kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wfksg\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.281351 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.781028 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg"] Nov 11 14:05:56 crc kubenswrapper[4842]: I1111 14:05:56.886620 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" event={"ID":"77d635ff-fb62-482a-b81e-18a8e371d404","Type":"ContainerStarted","Data":"4097b96665e556c5080717933a099efdff0b17ef18021d0a54ae8bc3c12cf327"} Nov 11 14:05:57 crc kubenswrapper[4842]: I1111 14:05:57.896476 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" event={"ID":"77d635ff-fb62-482a-b81e-18a8e371d404","Type":"ContainerStarted","Data":"84e3b9a939d67cf46edf9184b1d8f345ff86003e5438787d01f20cb02424d426"} Nov 11 14:05:57 crc kubenswrapper[4842]: I1111 14:05:57.918214 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" podStartSLOduration=2.513819577 podStartE2EDuration="2.918197203s" podCreationTimestamp="2025-11-11 14:05:55 +0000 UTC" firstStartedPulling="2025-11-11 14:05:56.787734538 +0000 UTC m=+2167.448024157" lastFinishedPulling="2025-11-11 14:05:57.192112154 +0000 UTC m=+2167.852401783" observedRunningTime="2025-11-11 14:05:57.914293811 +0000 UTC m=+2168.574583430" watchObservedRunningTime="2025-11-11 14:05:57.918197203 +0000 UTC m=+2168.578486822" Nov 11 14:06:14 crc kubenswrapper[4842]: I1111 14:06:14.961669 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:06:14 crc kubenswrapper[4842]: I1111 14:06:14.962356 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:06:14 crc kubenswrapper[4842]: I1111 14:06:14.962412 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:06:14 crc kubenswrapper[4842]: I1111 14:06:14.963270 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:06:14 crc kubenswrapper[4842]: I1111 14:06:14.963341 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70" gracePeriod=600 Nov 11 14:06:16 crc kubenswrapper[4842]: I1111 14:06:16.095526 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70" exitCode=0 Nov 11 14:06:16 crc kubenswrapper[4842]: I1111 14:06:16.095589 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70"} Nov 11 14:06:16 crc kubenswrapper[4842]: I1111 14:06:16.096180 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde"} Nov 11 14:06:16 crc kubenswrapper[4842]: I1111 14:06:16.096210 4842 scope.go:117] "RemoveContainer" containerID="5ececc87363ad110dbbc9dcc6d46b0040abc1ddf39d701de251d506bdcf8e06d" Nov 11 14:06:23 crc kubenswrapper[4842]: I1111 14:06:23.041595 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-kgfbg"] Nov 11 14:06:23 crc kubenswrapper[4842]: I1111 14:06:23.051595 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-kgfbg"] Nov 11 14:06:24 crc kubenswrapper[4842]: I1111 14:06:24.073619 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6b01f49-6afc-4a28-8564-46a9cd65db71" path="/var/lib/kubelet/pods/c6b01f49-6afc-4a28-8564-46a9cd65db71/volumes" Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.036263 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-k2gmm"] Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.046943 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-bkbt8"] Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.054135 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-53b5-account-create-pptc8"] Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.072436 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-k2gmm"] Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.072707 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-53b5-account-create-pptc8"] Nov 11 14:06:36 crc kubenswrapper[4842]: I1111 14:06:36.078032 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-bkbt8"] Nov 11 14:06:38 crc kubenswrapper[4842]: I1111 14:06:38.070866 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="414c83eb-da9d-454d-bf91-d577ca5b195b" path="/var/lib/kubelet/pods/414c83eb-da9d-454d-bf91-d577ca5b195b/volumes" Nov 11 14:06:38 crc kubenswrapper[4842]: I1111 14:06:38.072167 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67ae8176-e61d-426c-bb60-286f00de14e3" path="/var/lib/kubelet/pods/67ae8176-e61d-426c-bb60-286f00de14e3/volumes" Nov 11 14:06:38 crc kubenswrapper[4842]: I1111 14:06:38.073221 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d57c4421-985b-449d-a007-c05c9ab3e434" path="/var/lib/kubelet/pods/d57c4421-985b-449d-a007-c05c9ab3e434/volumes" Nov 11 14:06:41 crc kubenswrapper[4842]: I1111 14:06:41.067218 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-eda9-account-create-jdqqc"] Nov 11 14:06:41 crc kubenswrapper[4842]: I1111 14:06:41.122566 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-eda9-account-create-jdqqc"] Nov 11 14:06:42 crc kubenswrapper[4842]: I1111 14:06:42.069987 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4de90b2f-a1b4-4fab-ab4a-433470a522fb" path="/var/lib/kubelet/pods/4de90b2f-a1b4-4fab-ab4a-433470a522fb/volumes" Nov 11 14:06:51 crc kubenswrapper[4842]: I1111 14:06:51.026961 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-da18-account-create-npt7z"] Nov 11 14:06:51 crc kubenswrapper[4842]: I1111 14:06:51.041320 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-da18-account-create-npt7z"] Nov 11 14:06:52 crc kubenswrapper[4842]: I1111 14:06:52.070907 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ead23b7d-4069-406c-ad8f-a23027ccedd0" path="/var/lib/kubelet/pods/ead23b7d-4069-406c-ad8f-a23027ccedd0/volumes" Nov 11 14:06:58 crc kubenswrapper[4842]: I1111 14:06:58.796660 4842 scope.go:117] "RemoveContainer" containerID="a0d1582671902c4bdb1bad9135e8a03c9ae4727bb4dbcbea868cc7db9539de05" Nov 11 14:06:58 crc kubenswrapper[4842]: I1111 14:06:58.834030 4842 scope.go:117] "RemoveContainer" containerID="a8787af09f69c3034752d577148fe07f5ee677c9a5035ea073467905f5ad6987" Nov 11 14:06:58 crc kubenswrapper[4842]: I1111 14:06:58.875972 4842 scope.go:117] "RemoveContainer" containerID="c5312575cee0d77bc14f1a973e4b6baa0a78171022aa2004ba3f3b1ed8ce2a49" Nov 11 14:06:58 crc kubenswrapper[4842]: I1111 14:06:58.924678 4842 scope.go:117] "RemoveContainer" containerID="6a63dafd393be5d16a7041b67b38e5c7b0e5d5d211e92e7731d2ef8683ad8fff" Nov 11 14:06:58 crc kubenswrapper[4842]: I1111 14:06:58.960900 4842 scope.go:117] "RemoveContainer" containerID="66077263a1809dddd2813736e93559206d5f18b79f674e07e4d6c79121a2b3e9" Nov 11 14:06:59 crc kubenswrapper[4842]: I1111 14:06:59.006444 4842 scope.go:117] "RemoveContainer" containerID="fff140870778287630c5082bed1b692fe99fec4a4c250e3ac13bb0e7e66fa9f7" Nov 11 14:07:10 crc kubenswrapper[4842]: I1111 14:07:10.047329 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-9wc72"] Nov 11 14:07:10 crc kubenswrapper[4842]: I1111 14:07:10.075575 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-fnhpk"] Nov 11 14:07:10 crc kubenswrapper[4842]: I1111 14:07:10.075619 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-9wc72"] Nov 11 14:07:10 crc kubenswrapper[4842]: I1111 14:07:10.081672 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-fnhpk"] Nov 11 14:07:12 crc kubenswrapper[4842]: I1111 14:07:12.070269 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3302c38d-b1ae-4032-9ede-f26e1de76fc8" path="/var/lib/kubelet/pods/3302c38d-b1ae-4032-9ede-f26e1de76fc8/volumes" Nov 11 14:07:12 crc kubenswrapper[4842]: I1111 14:07:12.072442 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecc207bf-4706-4b38-9695-ab6ca646eac7" path="/var/lib/kubelet/pods/ecc207bf-4706-4b38-9695-ab6ca646eac7/volumes" Nov 11 14:07:13 crc kubenswrapper[4842]: I1111 14:07:13.029589 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5cxmb"] Nov 11 14:07:13 crc kubenswrapper[4842]: I1111 14:07:13.044017 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-qsmk8"] Nov 11 14:07:13 crc kubenswrapper[4842]: I1111 14:07:13.054917 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5cxmb"] Nov 11 14:07:13 crc kubenswrapper[4842]: I1111 14:07:13.063020 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-qsmk8"] Nov 11 14:07:14 crc kubenswrapper[4842]: I1111 14:07:14.072050 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36bca97a-f661-4ba9-8ed0-0a0f38a2f64d" path="/var/lib/kubelet/pods/36bca97a-f661-4ba9-8ed0-0a0f38a2f64d/volumes" Nov 11 14:07:14 crc kubenswrapper[4842]: I1111 14:07:14.073459 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="765495ea-98a3-43ba-9a52-9cb05487cbb5" path="/var/lib/kubelet/pods/765495ea-98a3-43ba-9a52-9cb05487cbb5/volumes" Nov 11 14:07:16 crc kubenswrapper[4842]: I1111 14:07:16.029250 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-7nbtt"] Nov 11 14:07:16 crc kubenswrapper[4842]: I1111 14:07:16.037823 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-7nbtt"] Nov 11 14:07:16 crc kubenswrapper[4842]: I1111 14:07:16.098557 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b592cf0-e1b9-4e74-8018-f244f2fc25b1" path="/var/lib/kubelet/pods/6b592cf0-e1b9-4e74-8018-f244f2fc25b1/volumes" Nov 11 14:07:20 crc kubenswrapper[4842]: I1111 14:07:20.027873 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-ed65-account-create-h4xrz"] Nov 11 14:07:20 crc kubenswrapper[4842]: I1111 14:07:20.035818 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-ed65-account-create-h4xrz"] Nov 11 14:07:20 crc kubenswrapper[4842]: I1111 14:07:20.069722 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b25a8db-967f-4a25-8daa-e7734831d0c4" path="/var/lib/kubelet/pods/4b25a8db-967f-4a25-8daa-e7734831d0c4/volumes" Nov 11 14:07:33 crc kubenswrapper[4842]: I1111 14:07:33.028090 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-lcspd"] Nov 11 14:07:33 crc kubenswrapper[4842]: I1111 14:07:33.045130 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-lcspd"] Nov 11 14:07:33 crc kubenswrapper[4842]: I1111 14:07:33.833898 4842 generic.go:334] "Generic (PLEG): container finished" podID="77d635ff-fb62-482a-b81e-18a8e371d404" containerID="84e3b9a939d67cf46edf9184b1d8f345ff86003e5438787d01f20cb02424d426" exitCode=0 Nov 11 14:07:33 crc kubenswrapper[4842]: I1111 14:07:33.833946 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" event={"ID":"77d635ff-fb62-482a-b81e-18a8e371d404","Type":"ContainerDied","Data":"84e3b9a939d67cf46edf9184b1d8f345ff86003e5438787d01f20cb02424d426"} Nov 11 14:07:34 crc kubenswrapper[4842]: I1111 14:07:34.076732 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b467d4be-8e0e-43a9-b2a7-51cc22c11e25" path="/var/lib/kubelet/pods/b467d4be-8e0e-43a9-b2a7-51cc22c11e25/volumes" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.235089 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.375180 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key\") pod \"77d635ff-fb62-482a-b81e-18a8e371d404\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.375483 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory\") pod \"77d635ff-fb62-482a-b81e-18a8e371d404\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.375735 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gh8rr\" (UniqueName: \"kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr\") pod \"77d635ff-fb62-482a-b81e-18a8e371d404\" (UID: \"77d635ff-fb62-482a-b81e-18a8e371d404\") " Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.380987 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr" (OuterVolumeSpecName: "kube-api-access-gh8rr") pod "77d635ff-fb62-482a-b81e-18a8e371d404" (UID: "77d635ff-fb62-482a-b81e-18a8e371d404"). InnerVolumeSpecName "kube-api-access-gh8rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.404601 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "77d635ff-fb62-482a-b81e-18a8e371d404" (UID: "77d635ff-fb62-482a-b81e-18a8e371d404"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.406504 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory" (OuterVolumeSpecName: "inventory") pod "77d635ff-fb62-482a-b81e-18a8e371d404" (UID: "77d635ff-fb62-482a-b81e-18a8e371d404"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.479200 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gh8rr\" (UniqueName: \"kubernetes.io/projected/77d635ff-fb62-482a-b81e-18a8e371d404-kube-api-access-gh8rr\") on node \"crc\" DevicePath \"\"" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.479235 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.479244 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d635ff-fb62-482a-b81e-18a8e371d404-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.857953 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" event={"ID":"77d635ff-fb62-482a-b81e-18a8e371d404","Type":"ContainerDied","Data":"4097b96665e556c5080717933a099efdff0b17ef18021d0a54ae8bc3c12cf327"} Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.858004 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4097b96665e556c5080717933a099efdff0b17ef18021d0a54ae8bc3c12cf327" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.858062 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wfksg" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.943127 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62"] Nov 11 14:07:35 crc kubenswrapper[4842]: E1111 14:07:35.943712 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d635ff-fb62-482a-b81e-18a8e371d404" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.943732 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d635ff-fb62-482a-b81e-18a8e371d404" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.943979 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d635ff-fb62-482a-b81e-18a8e371d404" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.944884 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.948177 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.948268 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.949039 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.949183 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:07:35 crc kubenswrapper[4842]: I1111 14:07:35.964032 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62"] Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.090291 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g64lx\" (UniqueName: \"kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.090357 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.090483 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.192666 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g64lx\" (UniqueName: \"kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.192908 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.193018 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.197752 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.197867 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.211468 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g64lx\" (UniqueName: \"kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-79s62\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.270587 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.774782 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62"] Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.794406 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:07:36 crc kubenswrapper[4842]: I1111 14:07:36.868932 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" event={"ID":"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a","Type":"ContainerStarted","Data":"8c775f5517858e71b0b6356075b7cd4cd018fa5411f005e5722e3bd7929bed73"} Nov 11 14:07:37 crc kubenswrapper[4842]: I1111 14:07:37.884557 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" event={"ID":"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a","Type":"ContainerStarted","Data":"6c8c87e03863a43a309591ffeddbe25e658ab1e7eb8730eb1814fa74508f8a0f"} Nov 11 14:07:37 crc kubenswrapper[4842]: I1111 14:07:37.908019 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" podStartSLOduration=2.268104467 podStartE2EDuration="2.90799223s" podCreationTimestamp="2025-11-11 14:07:35 +0000 UTC" firstStartedPulling="2025-11-11 14:07:36.794038597 +0000 UTC m=+2267.454328216" lastFinishedPulling="2025-11-11 14:07:37.43392636 +0000 UTC m=+2268.094215979" observedRunningTime="2025-11-11 14:07:37.907179564 +0000 UTC m=+2268.567469223" watchObservedRunningTime="2025-11-11 14:07:37.90799223 +0000 UTC m=+2268.568281849" Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.044439 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-32ac-account-create-dtmwr"] Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.054030 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-670a-account-create-dtqhk"] Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.071267 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-798f-account-create-j4bvf"] Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.083947 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-670a-account-create-dtqhk"] Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.099703 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-32ac-account-create-dtmwr"] Nov 11 14:07:44 crc kubenswrapper[4842]: I1111 14:07:44.108884 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-798f-account-create-j4bvf"] Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.032513 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-547zg"] Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.040780 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-547zg"] Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.071435 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fcd2f4b-fd39-425d-a3c8-382a5020d38b" path="/var/lib/kubelet/pods/1fcd2f4b-fd39-425d-a3c8-382a5020d38b/volumes" Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.072012 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48dceb55-6e5e-400b-a9fa-026a0c06bd41" path="/var/lib/kubelet/pods/48dceb55-6e5e-400b-a9fa-026a0c06bd41/volumes" Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.072522 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d1e700d-ece9-4398-9bfe-d36b8fe07607" path="/var/lib/kubelet/pods/5d1e700d-ece9-4398-9bfe-d36b8fe07607/volumes" Nov 11 14:07:46 crc kubenswrapper[4842]: I1111 14:07:46.073145 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a47141a6-6da7-4e16-b6c5-299f5709caa6" path="/var/lib/kubelet/pods/a47141a6-6da7-4e16-b6c5-299f5709caa6/volumes" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.165560 4842 scope.go:117] "RemoveContainer" containerID="7a880cb00b085135c48fa1646e350aef6a9601c1aaa7e47e1dc2e4c580fabeaf" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.192077 4842 scope.go:117] "RemoveContainer" containerID="29a09d93f9292afd2540589ae3d2824ce0b44982621c84543a389a652dd954ab" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.243147 4842 scope.go:117] "RemoveContainer" containerID="39f2c30ec1a9d41eb0871cc2ef9399a52b8dcbeb6a84f6d9b45e364e2824824f" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.292116 4842 scope.go:117] "RemoveContainer" containerID="230ea59936243c04daedf5ca034427af9fa2f2ac7f4bc6d8a5abf11e59f059d1" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.343935 4842 scope.go:117] "RemoveContainer" containerID="97359c39304da59018ff5d5dac439fbfb1c093b7f3916d3caa72a2ca01e10f54" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.407346 4842 scope.go:117] "RemoveContainer" containerID="0f82b024549ee83a881868bd2c19b000a7cffa515c0eb3408d9f7786124fe2b5" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.474234 4842 scope.go:117] "RemoveContainer" containerID="4deeb75fc13633671f698acc4a711bc669b767ee363af61e8fb67ec940977b9a" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.509042 4842 scope.go:117] "RemoveContainer" containerID="d1ee2e5d7242d98238ecc29642dd08f4f110b59935e67f0fe3ec88a5f3029ee4" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.538158 4842 scope.go:117] "RemoveContainer" containerID="faa4e5ef9ae5cc48bc5f78a037df77050cf327e7ebc298de016ab0097c705310" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.568619 4842 scope.go:117] "RemoveContainer" containerID="85e1e626291faa7e959429213b1eb0324e21ef79108218870afe406c92109563" Nov 11 14:07:59 crc kubenswrapper[4842]: I1111 14:07:59.600744 4842 scope.go:117] "RemoveContainer" containerID="1d1c9b5432ee57faf6c2ff49e711da754f1e2505992c9f6c8a6eeb6a03d4771d" Nov 11 14:08:05 crc kubenswrapper[4842]: I1111 14:08:05.038460 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-nqmw2"] Nov 11 14:08:05 crc kubenswrapper[4842]: I1111 14:08:05.047657 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-nqmw2"] Nov 11 14:08:06 crc kubenswrapper[4842]: I1111 14:08:06.078933 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a855b49-006b-47a5-a808-c1c3649473aa" path="/var/lib/kubelet/pods/8a855b49-006b-47a5-a808-c1c3649473aa/volumes" Nov 11 14:08:17 crc kubenswrapper[4842]: I1111 14:08:17.038768 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-7nz95"] Nov 11 14:08:17 crc kubenswrapper[4842]: I1111 14:08:17.048513 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-7nz95"] Nov 11 14:08:18 crc kubenswrapper[4842]: I1111 14:08:18.070198 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae52071f-3664-4aac-8657-3351df5c6fff" path="/var/lib/kubelet/pods/ae52071f-3664-4aac-8657-3351df5c6fff/volumes" Nov 11 14:08:29 crc kubenswrapper[4842]: I1111 14:08:29.030614 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-wnl6v"] Nov 11 14:08:29 crc kubenswrapper[4842]: I1111 14:08:29.042380 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-wnl6v"] Nov 11 14:08:30 crc kubenswrapper[4842]: I1111 14:08:30.085581 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a02fa3d1-7142-425d-b514-0a647bfda1ca" path="/var/lib/kubelet/pods/a02fa3d1-7142-425d-b514-0a647bfda1ca/volumes" Nov 11 14:08:44 crc kubenswrapper[4842]: I1111 14:08:44.960832 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:08:44 crc kubenswrapper[4842]: I1111 14:08:44.961493 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:08:46 crc kubenswrapper[4842]: I1111 14:08:46.050619 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-6xg8p"] Nov 11 14:08:46 crc kubenswrapper[4842]: I1111 14:08:46.084038 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-r24nh"] Nov 11 14:08:46 crc kubenswrapper[4842]: I1111 14:08:46.084121 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-r24nh"] Nov 11 14:08:46 crc kubenswrapper[4842]: I1111 14:08:46.089355 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-6xg8p"] Nov 11 14:08:48 crc kubenswrapper[4842]: I1111 14:08:48.069380 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69d5134b-7c5b-40d9-bcbd-a1bd368a358d" path="/var/lib/kubelet/pods/69d5134b-7c5b-40d9-bcbd-a1bd368a358d/volumes" Nov 11 14:08:48 crc kubenswrapper[4842]: I1111 14:08:48.070388 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c61f37c1-6c58-4ae1-a127-2238733058b4" path="/var/lib/kubelet/pods/c61f37c1-6c58-4ae1-a127-2238733058b4/volumes" Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.027016 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-dvshs"] Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.040023 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-lqchx"] Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.049927 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-dvshs"] Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.057240 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-lqchx"] Nov 11 14:08:51 crc kubenswrapper[4842]: E1111 14:08:51.296796 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53ccc159_9d7f_41b0_8f5d_bc9521be7f1a.slice/crio-conmon-6c8c87e03863a43a309591ffeddbe25e658ab1e7eb8730eb1814fa74508f8a0f.scope\": RecentStats: unable to find data in memory cache]" Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.565905 4842 generic.go:334] "Generic (PLEG): container finished" podID="53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" containerID="6c8c87e03863a43a309591ffeddbe25e658ab1e7eb8730eb1814fa74508f8a0f" exitCode=0 Nov 11 14:08:51 crc kubenswrapper[4842]: I1111 14:08:51.566681 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" event={"ID":"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a","Type":"ContainerDied","Data":"6c8c87e03863a43a309591ffeddbe25e658ab1e7eb8730eb1814fa74508f8a0f"} Nov 11 14:08:52 crc kubenswrapper[4842]: I1111 14:08:52.029096 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-nwc96"] Nov 11 14:08:52 crc kubenswrapper[4842]: I1111 14:08:52.038676 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-nwc96"] Nov 11 14:08:52 crc kubenswrapper[4842]: I1111 14:08:52.075565 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c9219a3-683a-4aec-b1a5-017efe925201" path="/var/lib/kubelet/pods/5c9219a3-683a-4aec-b1a5-017efe925201/volumes" Nov 11 14:08:52 crc kubenswrapper[4842]: I1111 14:08:52.076283 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e35c9893-2858-44cb-a754-0aae7ca251ef" path="/var/lib/kubelet/pods/e35c9893-2858-44cb-a754-0aae7ca251ef/volumes" Nov 11 14:08:52 crc kubenswrapper[4842]: I1111 14:08:52.077449 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365" path="/var/lib/kubelet/pods/fb2d05fd-327c-4c7e-89a2-8d8fe8e1e365/volumes" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.038554 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.187162 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory\") pod \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.187338 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g64lx\" (UniqueName: \"kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx\") pod \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.187405 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key\") pod \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\" (UID: \"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a\") " Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.193571 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx" (OuterVolumeSpecName: "kube-api-access-g64lx") pod "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" (UID: "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a"). InnerVolumeSpecName "kube-api-access-g64lx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.217184 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" (UID: "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.217860 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory" (OuterVolumeSpecName: "inventory") pod "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" (UID: "53ccc159-9d7f-41b0-8f5d-bc9521be7f1a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.289981 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.290019 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.290029 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g64lx\" (UniqueName: \"kubernetes.io/projected/53ccc159-9d7f-41b0-8f5d-bc9521be7f1a-kube-api-access-g64lx\") on node \"crc\" DevicePath \"\"" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.587702 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" event={"ID":"53ccc159-9d7f-41b0-8f5d-bc9521be7f1a","Type":"ContainerDied","Data":"8c775f5517858e71b0b6356075b7cd4cd018fa5411f005e5722e3bd7929bed73"} Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.587739 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c775f5517858e71b0b6356075b7cd4cd018fa5411f005e5722e3bd7929bed73" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.587763 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-79s62" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.677183 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h"] Nov 11 14:08:53 crc kubenswrapper[4842]: E1111 14:08:53.677851 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.677878 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.678227 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="53ccc159-9d7f-41b0-8f5d-bc9521be7f1a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.679146 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.681539 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.682057 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.682417 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.682668 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.710009 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h"] Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.800467 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz6sf\" (UniqueName: \"kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.800612 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.800811 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.903082 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.903179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz6sf\" (UniqueName: \"kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.903294 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.906964 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.917649 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:53 crc kubenswrapper[4842]: I1111 14:08:53.921143 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz6sf\" (UniqueName: \"kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bs62h\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:54 crc kubenswrapper[4842]: I1111 14:08:54.005381 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:08:54 crc kubenswrapper[4842]: I1111 14:08:54.497537 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h"] Nov 11 14:08:54 crc kubenswrapper[4842]: I1111 14:08:54.598849 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" event={"ID":"b96d38f2-b032-495a-8296-72c06458c86f","Type":"ContainerStarted","Data":"9c3eb78b5dbe646065b7f2b05bf679682e8b3ce28223cf127765436b85f09c54"} Nov 11 14:08:55 crc kubenswrapper[4842]: I1111 14:08:55.609446 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" event={"ID":"b96d38f2-b032-495a-8296-72c06458c86f","Type":"ContainerStarted","Data":"3752e4a5019f77b13591bc64241830ff68c115f470fc89de2a9ff80afb027037"} Nov 11 14:08:55 crc kubenswrapper[4842]: I1111 14:08:55.633409 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" podStartSLOduration=2.178402483 podStartE2EDuration="2.633384679s" podCreationTimestamp="2025-11-11 14:08:53 +0000 UTC" firstStartedPulling="2025-11-11 14:08:54.505168414 +0000 UTC m=+2345.165458053" lastFinishedPulling="2025-11-11 14:08:54.96015063 +0000 UTC m=+2345.620440249" observedRunningTime="2025-11-11 14:08:55.6258634 +0000 UTC m=+2346.286153019" watchObservedRunningTime="2025-11-11 14:08:55.633384679 +0000 UTC m=+2346.293674298" Nov 11 14:08:59 crc kubenswrapper[4842]: I1111 14:08:59.844993 4842 scope.go:117] "RemoveContainer" containerID="8765db9bdef24aa4b3ad1ae04cc6cb698429f237b11c40b9d8c1c1e343fa2f25" Nov 11 14:08:59 crc kubenswrapper[4842]: I1111 14:08:59.875768 4842 scope.go:117] "RemoveContainer" containerID="3adcedbcd834e3507ffafa2601772236fde16cc6021a523303c88ac00e5e9f5f" Nov 11 14:08:59 crc kubenswrapper[4842]: I1111 14:08:59.924888 4842 scope.go:117] "RemoveContainer" containerID="a6d2a9158406282f3f0c02371f9d1ccde982bf8e0fc880379df425ba46ee4246" Nov 11 14:08:59 crc kubenswrapper[4842]: I1111 14:08:59.982310 4842 scope.go:117] "RemoveContainer" containerID="9edeb81704cf367311040a89b729f5e4a6380120be9870cd114757adc14d1d96" Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.053671 4842 scope.go:117] "RemoveContainer" containerID="ef4daab76392c0a6aeee495a68dc96653c8e12008714269f34a383934f5655c6" Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.053793 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f633-account-create-dwqqz"] Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.079315 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f633-account-create-dwqqz"] Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.110752 4842 scope.go:117] "RemoveContainer" containerID="e6f8ae81dfc1be49ad74310f51ce8735a6cffc868610c81cdaf1dfe3180bd5a2" Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.137483 4842 scope.go:117] "RemoveContainer" containerID="d205021a04359c1f6f62dfb28b9d8bdb1acb380a341e2af7013d953af97ab3dc" Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.180957 4842 scope.go:117] "RemoveContainer" containerID="207313ad3ea06d2b48be541f6e02378b43d76295e9964a536c060373dcd8c022" Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.664476 4842 generic.go:334] "Generic (PLEG): container finished" podID="b96d38f2-b032-495a-8296-72c06458c86f" containerID="3752e4a5019f77b13591bc64241830ff68c115f470fc89de2a9ff80afb027037" exitCode=0 Nov 11 14:09:00 crc kubenswrapper[4842]: I1111 14:09:00.664518 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" event={"ID":"b96d38f2-b032-495a-8296-72c06458c86f","Type":"ContainerDied","Data":"3752e4a5019f77b13591bc64241830ff68c115f470fc89de2a9ff80afb027037"} Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.071392 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="773d178a-6ce6-4bed-9faf-42147a7ba279" path="/var/lib/kubelet/pods/773d178a-6ce6-4bed-9faf-42147a7ba279/volumes" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.150340 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.261794 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jz6sf\" (UniqueName: \"kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf\") pod \"b96d38f2-b032-495a-8296-72c06458c86f\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.263085 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory\") pod \"b96d38f2-b032-495a-8296-72c06458c86f\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.263248 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key\") pod \"b96d38f2-b032-495a-8296-72c06458c86f\" (UID: \"b96d38f2-b032-495a-8296-72c06458c86f\") " Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.267470 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf" (OuterVolumeSpecName: "kube-api-access-jz6sf") pod "b96d38f2-b032-495a-8296-72c06458c86f" (UID: "b96d38f2-b032-495a-8296-72c06458c86f"). InnerVolumeSpecName "kube-api-access-jz6sf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.290688 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b96d38f2-b032-495a-8296-72c06458c86f" (UID: "b96d38f2-b032-495a-8296-72c06458c86f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.291073 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory" (OuterVolumeSpecName: "inventory") pod "b96d38f2-b032-495a-8296-72c06458c86f" (UID: "b96d38f2-b032-495a-8296-72c06458c86f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.365698 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.365740 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96d38f2-b032-495a-8296-72c06458c86f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.365752 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jz6sf\" (UniqueName: \"kubernetes.io/projected/b96d38f2-b032-495a-8296-72c06458c86f-kube-api-access-jz6sf\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.683780 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" event={"ID":"b96d38f2-b032-495a-8296-72c06458c86f","Type":"ContainerDied","Data":"9c3eb78b5dbe646065b7f2b05bf679682e8b3ce28223cf127765436b85f09c54"} Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.684091 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c3eb78b5dbe646065b7f2b05bf679682e8b3ce28223cf127765436b85f09c54" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.683843 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bs62h" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.763002 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm"] Nov 11 14:09:02 crc kubenswrapper[4842]: E1111 14:09:02.763560 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96d38f2-b032-495a-8296-72c06458c86f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.763590 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96d38f2-b032-495a-8296-72c06458c86f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.763998 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96d38f2-b032-495a-8296-72c06458c86f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.768685 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.773947 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.774114 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.776973 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.777162 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.779751 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm"] Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.881880 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w747x\" (UniqueName: \"kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.882047 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.882111 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.983453 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w747x\" (UniqueName: \"kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.983554 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.983622 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.989233 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.997507 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:02 crc kubenswrapper[4842]: I1111 14:09:02.999657 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w747x\" (UniqueName: \"kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-8drsm\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:03 crc kubenswrapper[4842]: I1111 14:09:03.094160 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:03 crc kubenswrapper[4842]: I1111 14:09:03.629009 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm"] Nov 11 14:09:03 crc kubenswrapper[4842]: I1111 14:09:03.691960 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" event={"ID":"a6455983-1479-4b83-a9ba-2aef71382fc7","Type":"ContainerStarted","Data":"ed6419d99722afc8f94d1f9566c26f53b2fc3ecbd9f0702734b61ab50ad1e5dd"} Nov 11 14:09:04 crc kubenswrapper[4842]: I1111 14:09:04.702026 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" event={"ID":"a6455983-1479-4b83-a9ba-2aef71382fc7","Type":"ContainerStarted","Data":"dac8873eaee165263d1c49ad5a865847bb4698607cfbfd0b58d011ffa07c1615"} Nov 11 14:09:04 crc kubenswrapper[4842]: I1111 14:09:04.724252 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" podStartSLOduration=2.232271331 podStartE2EDuration="2.724230456s" podCreationTimestamp="2025-11-11 14:09:02 +0000 UTC" firstStartedPulling="2025-11-11 14:09:03.632903436 +0000 UTC m=+2354.293193055" lastFinishedPulling="2025-11-11 14:09:04.124862561 +0000 UTC m=+2354.785152180" observedRunningTime="2025-11-11 14:09:04.715786559 +0000 UTC m=+2355.376076168" watchObservedRunningTime="2025-11-11 14:09:04.724230456 +0000 UTC m=+2355.384520085" Nov 11 14:09:11 crc kubenswrapper[4842]: I1111 14:09:11.036601 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1ecb-account-create-crxmr"] Nov 11 14:09:11 crc kubenswrapper[4842]: I1111 14:09:11.045132 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5382-account-create-kkn8k"] Nov 11 14:09:11 crc kubenswrapper[4842]: I1111 14:09:11.053312 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1ecb-account-create-crxmr"] Nov 11 14:09:11 crc kubenswrapper[4842]: I1111 14:09:11.060344 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5382-account-create-kkn8k"] Nov 11 14:09:12 crc kubenswrapper[4842]: I1111 14:09:12.072159 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64d246d4-7202-4b17-9d4e-febbf9bbfff7" path="/var/lib/kubelet/pods/64d246d4-7202-4b17-9d4e-febbf9bbfff7/volumes" Nov 11 14:09:12 crc kubenswrapper[4842]: I1111 14:09:12.073594 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66fea681-2020-41b9-be32-04f0b846e302" path="/var/lib/kubelet/pods/66fea681-2020-41b9-be32-04f0b846e302/volumes" Nov 11 14:09:14 crc kubenswrapper[4842]: I1111 14:09:14.961610 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:09:14 crc kubenswrapper[4842]: I1111 14:09:14.961949 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.858988 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.861511 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.880210 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.924268 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8zhc\" (UniqueName: \"kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.924395 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:34 crc kubenswrapper[4842]: I1111 14:09:34.924424 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.025848 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.026165 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.026339 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8zhc\" (UniqueName: \"kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.026384 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.026968 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.047459 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8zhc\" (UniqueName: \"kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc\") pod \"redhat-marketplace-cqdms\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.184688 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:35 crc kubenswrapper[4842]: I1111 14:09:35.634361 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:36 crc kubenswrapper[4842]: I1111 14:09:36.017003 4842 generic.go:334] "Generic (PLEG): container finished" podID="8810a597-e909-4109-9679-d295c567593b" containerID="c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72" exitCode=0 Nov 11 14:09:36 crc kubenswrapper[4842]: I1111 14:09:36.017209 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerDied","Data":"c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72"} Nov 11 14:09:36 crc kubenswrapper[4842]: I1111 14:09:36.017363 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerStarted","Data":"7f4d2d466bfa5eab135d49880692e34f00adaa96196821e5efd59701019d9ae3"} Nov 11 14:09:36 crc kubenswrapper[4842]: I1111 14:09:36.078404 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cdldt"] Nov 11 14:09:36 crc kubenswrapper[4842]: I1111 14:09:36.096217 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cdldt"] Nov 11 14:09:37 crc kubenswrapper[4842]: I1111 14:09:37.026483 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerStarted","Data":"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61"} Nov 11 14:09:38 crc kubenswrapper[4842]: I1111 14:09:38.039606 4842 generic.go:334] "Generic (PLEG): container finished" podID="8810a597-e909-4109-9679-d295c567593b" containerID="859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61" exitCode=0 Nov 11 14:09:38 crc kubenswrapper[4842]: I1111 14:09:38.039692 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerDied","Data":"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61"} Nov 11 14:09:38 crc kubenswrapper[4842]: I1111 14:09:38.070613 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb167dbe-4958-4dff-8389-4fcd23764a9c" path="/var/lib/kubelet/pods/cb167dbe-4958-4dff-8389-4fcd23764a9c/volumes" Nov 11 14:09:39 crc kubenswrapper[4842]: I1111 14:09:39.058105 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerStarted","Data":"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0"} Nov 11 14:09:39 crc kubenswrapper[4842]: I1111 14:09:39.089427 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cqdms" podStartSLOduration=2.54895122 podStartE2EDuration="5.089400888s" podCreationTimestamp="2025-11-11 14:09:34 +0000 UTC" firstStartedPulling="2025-11-11 14:09:36.023262241 +0000 UTC m=+2386.683551860" lastFinishedPulling="2025-11-11 14:09:38.563711909 +0000 UTC m=+2389.224001528" observedRunningTime="2025-11-11 14:09:39.080238779 +0000 UTC m=+2389.740528398" watchObservedRunningTime="2025-11-11 14:09:39.089400888 +0000 UTC m=+2389.749690517" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.553190 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.556485 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.565284 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.721768 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.721855 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49bkg\" (UniqueName: \"kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.721992 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.823946 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49bkg\" (UniqueName: \"kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.824084 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.824183 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.824709 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.824743 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.848584 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49bkg\" (UniqueName: \"kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg\") pod \"certified-operators-xhnk5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:42 crc kubenswrapper[4842]: I1111 14:09:42.891792 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:43 crc kubenswrapper[4842]: I1111 14:09:43.437645 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:43 crc kubenswrapper[4842]: W1111 14:09:43.445736 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod452effce_f4ea_422c_aef4_f87b7ceb8ef5.slice/crio-ed623108d8aa57595603fb8f0c1ea4f4d133b3d0e1725024b157f94e45a2b841 WatchSource:0}: Error finding container ed623108d8aa57595603fb8f0c1ea4f4d133b3d0e1725024b157f94e45a2b841: Status 404 returned error can't find the container with id ed623108d8aa57595603fb8f0c1ea4f4d133b3d0e1725024b157f94e45a2b841 Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.142967 4842 generic.go:334] "Generic (PLEG): container finished" podID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerID="3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1" exitCode=0 Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.143072 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerDied","Data":"3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1"} Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.143334 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerStarted","Data":"ed623108d8aa57595603fb8f0c1ea4f4d133b3d0e1725024b157f94e45a2b841"} Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.961285 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.961580 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.961618 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.962464 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:09:44 crc kubenswrapper[4842]: I1111 14:09:44.962530 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" gracePeriod=600 Nov 11 14:09:45 crc kubenswrapper[4842]: E1111 14:09:45.092337 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.161614 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerStarted","Data":"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64"} Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.166397 4842 generic.go:334] "Generic (PLEG): container finished" podID="a6455983-1479-4b83-a9ba-2aef71382fc7" containerID="dac8873eaee165263d1c49ad5a865847bb4698607cfbfd0b58d011ffa07c1615" exitCode=0 Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.166592 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" event={"ID":"a6455983-1479-4b83-a9ba-2aef71382fc7","Type":"ContainerDied","Data":"dac8873eaee165263d1c49ad5a865847bb4698607cfbfd0b58d011ffa07c1615"} Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.171939 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" exitCode=0 Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.171992 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde"} Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.172044 4842 scope.go:117] "RemoveContainer" containerID="368d16971bd4982dddf56e9a7531c321d52274d85215ec7740b6521007561a70" Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.173431 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:09:45 crc kubenswrapper[4842]: E1111 14:09:45.174479 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.184800 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.185433 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:45 crc kubenswrapper[4842]: I1111 14:09:45.248926 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.183922 4842 generic.go:334] "Generic (PLEG): container finished" podID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerID="ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64" exitCode=0 Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.183965 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerDied","Data":"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64"} Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.242681 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.604646 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.727467 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w747x\" (UniqueName: \"kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x\") pod \"a6455983-1479-4b83-a9ba-2aef71382fc7\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.727934 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory\") pod \"a6455983-1479-4b83-a9ba-2aef71382fc7\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.728044 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key\") pod \"a6455983-1479-4b83-a9ba-2aef71382fc7\" (UID: \"a6455983-1479-4b83-a9ba-2aef71382fc7\") " Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.733935 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x" (OuterVolumeSpecName: "kube-api-access-w747x") pod "a6455983-1479-4b83-a9ba-2aef71382fc7" (UID: "a6455983-1479-4b83-a9ba-2aef71382fc7"). InnerVolumeSpecName "kube-api-access-w747x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.770266 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a6455983-1479-4b83-a9ba-2aef71382fc7" (UID: "a6455983-1479-4b83-a9ba-2aef71382fc7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.776993 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory" (OuterVolumeSpecName: "inventory") pod "a6455983-1479-4b83-a9ba-2aef71382fc7" (UID: "a6455983-1479-4b83-a9ba-2aef71382fc7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.831752 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w747x\" (UniqueName: \"kubernetes.io/projected/a6455983-1479-4b83-a9ba-2aef71382fc7-kube-api-access-w747x\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.831794 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:46 crc kubenswrapper[4842]: I1111 14:09:46.831804 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6455983-1479-4b83-a9ba-2aef71382fc7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.194854 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.194863 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-8drsm" event={"ID":"a6455983-1479-4b83-a9ba-2aef71382fc7","Type":"ContainerDied","Data":"ed6419d99722afc8f94d1f9566c26f53b2fc3ecbd9f0702734b61ab50ad1e5dd"} Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.196156 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed6419d99722afc8f94d1f9566c26f53b2fc3ecbd9f0702734b61ab50ad1e5dd" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.197702 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerStarted","Data":"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea"} Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.238303 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xhnk5" podStartSLOduration=2.579458195 podStartE2EDuration="5.238283262s" podCreationTimestamp="2025-11-11 14:09:42 +0000 UTC" firstStartedPulling="2025-11-11 14:09:44.144592114 +0000 UTC m=+2394.804881733" lastFinishedPulling="2025-11-11 14:09:46.803417181 +0000 UTC m=+2397.463706800" observedRunningTime="2025-11-11 14:09:47.221048008 +0000 UTC m=+2397.881337627" watchObservedRunningTime="2025-11-11 14:09:47.238283262 +0000 UTC m=+2397.898572881" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.292757 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg"] Nov 11 14:09:47 crc kubenswrapper[4842]: E1111 14:09:47.293186 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6455983-1479-4b83-a9ba-2aef71382fc7" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.293204 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6455983-1479-4b83-a9ba-2aef71382fc7" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.293383 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6455983-1479-4b83-a9ba-2aef71382fc7" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.294055 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.295603 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.295784 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.296603 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.297496 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.321685 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg"] Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.452451 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ws8m\" (UniqueName: \"kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.452882 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.453026 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.555355 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ws8m\" (UniqueName: \"kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.555504 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.555578 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.561807 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.562563 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.584892 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ws8m\" (UniqueName: \"kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-btvfg\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.622303 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:09:47 crc kubenswrapper[4842]: I1111 14:09:47.627386 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:48 crc kubenswrapper[4842]: I1111 14:09:48.155237 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg"] Nov 11 14:09:48 crc kubenswrapper[4842]: W1111 14:09:48.163127 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf94c8fa3_0b93_4cf3_9aae_9feb9bc79273.slice/crio-f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b WatchSource:0}: Error finding container f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b: Status 404 returned error can't find the container with id f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b Nov 11 14:09:48 crc kubenswrapper[4842]: I1111 14:09:48.231354 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" event={"ID":"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273","Type":"ContainerStarted","Data":"f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b"} Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.245241 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" event={"ID":"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273","Type":"ContainerStarted","Data":"fdec77cb18889a965555bd20622d7e245ee3ef8763be5c9d2259f65f5a5eefbf"} Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.245352 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cqdms" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="registry-server" containerID="cri-o://96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0" gracePeriod=2 Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.267790 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" podStartSLOduration=1.828402912 podStartE2EDuration="2.267773747s" podCreationTimestamp="2025-11-11 14:09:47 +0000 UTC" firstStartedPulling="2025-11-11 14:09:48.16518357 +0000 UTC m=+2398.825473189" lastFinishedPulling="2025-11-11 14:09:48.604554405 +0000 UTC m=+2399.264844024" observedRunningTime="2025-11-11 14:09:49.260651121 +0000 UTC m=+2399.920940750" watchObservedRunningTime="2025-11-11 14:09:49.267773747 +0000 UTC m=+2399.928063366" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.708807 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.813897 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities\") pod \"8810a597-e909-4109-9679-d295c567593b\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.813996 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8zhc\" (UniqueName: \"kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc\") pod \"8810a597-e909-4109-9679-d295c567593b\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.814054 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content\") pod \"8810a597-e909-4109-9679-d295c567593b\" (UID: \"8810a597-e909-4109-9679-d295c567593b\") " Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.815117 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities" (OuterVolumeSpecName: "utilities") pod "8810a597-e909-4109-9679-d295c567593b" (UID: "8810a597-e909-4109-9679-d295c567593b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.831081 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8810a597-e909-4109-9679-d295c567593b" (UID: "8810a597-e909-4109-9679-d295c567593b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.831279 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc" (OuterVolumeSpecName: "kube-api-access-c8zhc") pod "8810a597-e909-4109-9679-d295c567593b" (UID: "8810a597-e909-4109-9679-d295c567593b"). InnerVolumeSpecName "kube-api-access-c8zhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.916192 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8zhc\" (UniqueName: \"kubernetes.io/projected/8810a597-e909-4109-9679-d295c567593b-kube-api-access-c8zhc\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.916224 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:49 crc kubenswrapper[4842]: I1111 14:09:49.916232 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8810a597-e909-4109-9679-d295c567593b-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.255550 4842 generic.go:334] "Generic (PLEG): container finished" podID="8810a597-e909-4109-9679-d295c567593b" containerID="96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0" exitCode=0 Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.255959 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerDied","Data":"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0"} Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.256012 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cqdms" event={"ID":"8810a597-e909-4109-9679-d295c567593b","Type":"ContainerDied","Data":"7f4d2d466bfa5eab135d49880692e34f00adaa96196821e5efd59701019d9ae3"} Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.256031 4842 scope.go:117] "RemoveContainer" containerID="96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.257020 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cqdms" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.289673 4842 scope.go:117] "RemoveContainer" containerID="859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.305036 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.314203 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cqdms"] Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.331335 4842 scope.go:117] "RemoveContainer" containerID="c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.378375 4842 scope.go:117] "RemoveContainer" containerID="96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0" Nov 11 14:09:50 crc kubenswrapper[4842]: E1111 14:09:50.378843 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0\": container with ID starting with 96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0 not found: ID does not exist" containerID="96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.378875 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0"} err="failed to get container status \"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0\": rpc error: code = NotFound desc = could not find container \"96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0\": container with ID starting with 96b4bf4bedc6761c6f41b215e10a4575d1007674f885063ca8e49967288358d0 not found: ID does not exist" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.378896 4842 scope.go:117] "RemoveContainer" containerID="859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61" Nov 11 14:09:50 crc kubenswrapper[4842]: E1111 14:09:50.379274 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61\": container with ID starting with 859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61 not found: ID does not exist" containerID="859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.379297 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61"} err="failed to get container status \"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61\": rpc error: code = NotFound desc = could not find container \"859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61\": container with ID starting with 859bbb6c31276e3d504cf9aacc79d444dfd804b27f7e7b544eaded7b497adc61 not found: ID does not exist" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.379309 4842 scope.go:117] "RemoveContainer" containerID="c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72" Nov 11 14:09:50 crc kubenswrapper[4842]: E1111 14:09:50.379628 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72\": container with ID starting with c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72 not found: ID does not exist" containerID="c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72" Nov 11 14:09:50 crc kubenswrapper[4842]: I1111 14:09:50.379652 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72"} err="failed to get container status \"c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72\": rpc error: code = NotFound desc = could not find container \"c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72\": container with ID starting with c59216bc4be9cf45bf09366c09eb346c2a6126d19bcc1540ae98ec1221b21b72 not found: ID does not exist" Nov 11 14:09:52 crc kubenswrapper[4842]: I1111 14:09:52.073162 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8810a597-e909-4109-9679-d295c567593b" path="/var/lib/kubelet/pods/8810a597-e909-4109-9679-d295c567593b/volumes" Nov 11 14:09:52 crc kubenswrapper[4842]: E1111 14:09:52.880031 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:09:52 crc kubenswrapper[4842]: I1111 14:09:52.892566 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:52 crc kubenswrapper[4842]: I1111 14:09:52.892597 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:52 crc kubenswrapper[4842]: I1111 14:09:52.933852 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:53 crc kubenswrapper[4842]: I1111 14:09:53.331280 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:53 crc kubenswrapper[4842]: I1111 14:09:53.823378 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:55 crc kubenswrapper[4842]: I1111 14:09:55.064011 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-tbtw2"] Nov 11 14:09:55 crc kubenswrapper[4842]: I1111 14:09:55.075724 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-tbtw2"] Nov 11 14:09:55 crc kubenswrapper[4842]: I1111 14:09:55.301746 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xhnk5" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="registry-server" containerID="cri-o://efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea" gracePeriod=2 Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.078646 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a471b182-7b05-4a81-93eb-257a2ce28a68" path="/var/lib/kubelet/pods/a471b182-7b05-4a81-93eb-257a2ce28a68/volumes" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.252548 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.311151 4842 generic.go:334] "Generic (PLEG): container finished" podID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerID="efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea" exitCode=0 Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.311194 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerDied","Data":"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea"} Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.311221 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xhnk5" event={"ID":"452effce-f4ea-422c-aef4-f87b7ceb8ef5","Type":"ContainerDied","Data":"ed623108d8aa57595603fb8f0c1ea4f4d133b3d0e1725024b157f94e45a2b841"} Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.311238 4842 scope.go:117] "RemoveContainer" containerID="efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.311613 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xhnk5" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.333024 4842 scope.go:117] "RemoveContainer" containerID="ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.352825 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content\") pod \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.352948 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities\") pod \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.353014 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49bkg\" (UniqueName: \"kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg\") pod \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\" (UID: \"452effce-f4ea-422c-aef4-f87b7ceb8ef5\") " Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.354363 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities" (OuterVolumeSpecName: "utilities") pod "452effce-f4ea-422c-aef4-f87b7ceb8ef5" (UID: "452effce-f4ea-422c-aef4-f87b7ceb8ef5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.357063 4842 scope.go:117] "RemoveContainer" containerID="3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.359871 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg" (OuterVolumeSpecName: "kube-api-access-49bkg") pod "452effce-f4ea-422c-aef4-f87b7ceb8ef5" (UID: "452effce-f4ea-422c-aef4-f87b7ceb8ef5"). InnerVolumeSpecName "kube-api-access-49bkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.399034 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "452effce-f4ea-422c-aef4-f87b7ceb8ef5" (UID: "452effce-f4ea-422c-aef4-f87b7ceb8ef5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.453403 4842 scope.go:117] "RemoveContainer" containerID="efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea" Nov 11 14:09:56 crc kubenswrapper[4842]: E1111 14:09:56.453883 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea\": container with ID starting with efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea not found: ID does not exist" containerID="efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.453933 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea"} err="failed to get container status \"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea\": rpc error: code = NotFound desc = could not find container \"efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea\": container with ID starting with efcd12ae74ba3a4cfd8ff64950268a138adb1cede7ba7578556e21dc986e8aea not found: ID does not exist" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.453963 4842 scope.go:117] "RemoveContainer" containerID="ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64" Nov 11 14:09:56 crc kubenswrapper[4842]: E1111 14:09:56.454443 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64\": container with ID starting with ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64 not found: ID does not exist" containerID="ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454475 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64"} err="failed to get container status \"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64\": rpc error: code = NotFound desc = could not find container \"ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64\": container with ID starting with ce80398742af6f29dced449adf2ed75c3044d91c00112abca0a34a6f17c5fe64 not found: ID does not exist" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454496 4842 scope.go:117] "RemoveContainer" containerID="3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1" Nov 11 14:09:56 crc kubenswrapper[4842]: E1111 14:09:56.454747 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1\": container with ID starting with 3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1 not found: ID does not exist" containerID="3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454783 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1"} err="failed to get container status \"3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1\": rpc error: code = NotFound desc = could not find container \"3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1\": container with ID starting with 3c9793817cb721b5b9d2079ee6630a07d4b3d6bf774c7fdbfa7a3339fdc7faf1 not found: ID does not exist" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454897 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454928 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/452effce-f4ea-422c-aef4-f87b7ceb8ef5-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.454945 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49bkg\" (UniqueName: \"kubernetes.io/projected/452effce-f4ea-422c-aef4-f87b7ceb8ef5-kube-api-access-49bkg\") on node \"crc\" DevicePath \"\"" Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.640583 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:56 crc kubenswrapper[4842]: I1111 14:09:56.647243 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xhnk5"] Nov 11 14:09:58 crc kubenswrapper[4842]: I1111 14:09:58.028770 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-fxmwr"] Nov 11 14:09:58 crc kubenswrapper[4842]: I1111 14:09:58.039128 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-fxmwr"] Nov 11 14:09:58 crc kubenswrapper[4842]: I1111 14:09:58.070946 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" path="/var/lib/kubelet/pods/452effce-f4ea-422c-aef4-f87b7ceb8ef5/volumes" Nov 11 14:09:58 crc kubenswrapper[4842]: I1111 14:09:58.071652 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e53dd0e9-39e2-4158-97dc-6a28d3b14b5f" path="/var/lib/kubelet/pods/e53dd0e9-39e2-4158-97dc-6a28d3b14b5f/volumes" Nov 11 14:09:59 crc kubenswrapper[4842]: I1111 14:09:59.059078 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:09:59 crc kubenswrapper[4842]: E1111 14:09:59.059508 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.330875 4842 scope.go:117] "RemoveContainer" containerID="1cdfbb355e8a2da5d86b3e9dfe3a1c82316efab30a662bd060764f099610aa02" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.372827 4842 scope.go:117] "RemoveContainer" containerID="ddad56ec3eb384de6a1cd53346451ed25181a486cac02184413c318b6d4b1f39" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.396739 4842 scope.go:117] "RemoveContainer" containerID="e00dda4eec59475414c3616691804a6d3344a64857746dbd3aebe4b9e86395c0" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.447635 4842 scope.go:117] "RemoveContainer" containerID="41ba1f5abbba2b1bf69614a768a73e9b4f9571536f9042c60b3bf1beb4f0a195" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.505827 4842 scope.go:117] "RemoveContainer" containerID="8f09f9d4d5d0a93c018bb5bf23ced83c76caf76d4f13d07a4c010f27c3b581ad" Nov 11 14:10:00 crc kubenswrapper[4842]: I1111 14:10:00.530982 4842 scope.go:117] "RemoveContainer" containerID="c38bc0cb8d91fe393ee940ecd1c837b65385ed477d6b18e59f0cc0229bcaf6cc" Nov 11 14:10:03 crc kubenswrapper[4842]: E1111 14:10:03.115219 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:10:11 crc kubenswrapper[4842]: I1111 14:10:11.059497 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:10:11 crc kubenswrapper[4842]: E1111 14:10:11.060445 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:10:13 crc kubenswrapper[4842]: E1111 14:10:13.381890 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:10:23 crc kubenswrapper[4842]: E1111 14:10:23.615864 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:10:25 crc kubenswrapper[4842]: I1111 14:10:25.058961 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:10:25 crc kubenswrapper[4842]: E1111 14:10:25.059602 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:10:33 crc kubenswrapper[4842]: E1111 14:10:33.868126 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:10:37 crc kubenswrapper[4842]: I1111 14:10:37.059448 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:10:37 crc kubenswrapper[4842]: E1111 14:10:37.060321 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:10:38 crc kubenswrapper[4842]: I1111 14:10:38.046594 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-fhtn5"] Nov 11 14:10:38 crc kubenswrapper[4842]: I1111 14:10:38.055806 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-fhtn5"] Nov 11 14:10:38 crc kubenswrapper[4842]: I1111 14:10:38.069398 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68060912-a0f0-46b2-8353-95d60b23450e" path="/var/lib/kubelet/pods/68060912-a0f0-46b2-8353-95d60b23450e/volumes" Nov 11 14:10:44 crc kubenswrapper[4842]: E1111 14:10:44.117349 4842 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8810a597_e909_4109_9679_d295c567593b.slice\": RecentStats: unable to find data in memory cache]" Nov 11 14:10:49 crc kubenswrapper[4842]: I1111 14:10:49.780121 4842 generic.go:334] "Generic (PLEG): container finished" podID="f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" containerID="fdec77cb18889a965555bd20622d7e245ee3ef8763be5c9d2259f65f5a5eefbf" exitCode=0 Nov 11 14:10:49 crc kubenswrapper[4842]: I1111 14:10:49.780199 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" event={"ID":"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273","Type":"ContainerDied","Data":"fdec77cb18889a965555bd20622d7e245ee3ef8763be5c9d2259f65f5a5eefbf"} Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.059318 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.059750 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.194520 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.326430 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ws8m\" (UniqueName: \"kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m\") pod \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.326715 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key\") pod \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.326737 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory\") pod \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\" (UID: \"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273\") " Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.334464 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m" (OuterVolumeSpecName: "kube-api-access-9ws8m") pod "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" (UID: "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273"). InnerVolumeSpecName "kube-api-access-9ws8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.354039 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" (UID: "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.355471 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory" (OuterVolumeSpecName: "inventory") pod "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" (UID: "f94c8fa3-0b93-4cf3-9aae-9feb9bc79273"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.428749 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.428791 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.428801 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ws8m\" (UniqueName: \"kubernetes.io/projected/f94c8fa3-0b93-4cf3-9aae-9feb9bc79273-kube-api-access-9ws8m\") on node \"crc\" DevicePath \"\"" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.800374 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" event={"ID":"f94c8fa3-0b93-4cf3-9aae-9feb9bc79273","Type":"ContainerDied","Data":"f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b"} Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.800417 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8c7a0ff9f05be12c98fcf5568f086eb6bb14fd0080ce1774f27239f78ba796b" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.800458 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-btvfg" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896041 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tmvxh"] Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896438 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896460 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896491 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896503 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896519 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896529 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896554 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="extract-utilities" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896563 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="extract-utilities" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896577 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="extract-content" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896584 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="extract-content" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896600 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="extract-content" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896608 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="extract-content" Nov 11 14:10:51 crc kubenswrapper[4842]: E1111 14:10:51.896618 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="extract-utilities" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896625 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="extract-utilities" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896847 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f94c8fa3-0b93-4cf3-9aae-9feb9bc79273" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896870 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="8810a597-e909-4109-9679-d295c567593b" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.896881 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="452effce-f4ea-422c-aef4-f87b7ceb8ef5" containerName="registry-server" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.897703 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.900342 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.900540 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.900567 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.900664 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:10:51 crc kubenswrapper[4842]: I1111 14:10:51.921035 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tmvxh"] Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.041921 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6xdq\" (UniqueName: \"kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.041981 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.042857 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.145080 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6xdq\" (UniqueName: \"kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.145145 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.145200 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.149795 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.150355 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.161039 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6xdq\" (UniqueName: \"kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq\") pod \"ssh-known-hosts-edpm-deployment-tmvxh\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.230625 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.734416 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-tmvxh"] Nov 11 14:10:52 crc kubenswrapper[4842]: I1111 14:10:52.811540 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" event={"ID":"448641d9-e39c-4fe4-bc02-cbf87ea74789","Type":"ContainerStarted","Data":"762f520f82759828075c2af8735f2c56507a10eddadabb37164c1aa3e2b27768"} Nov 11 14:10:53 crc kubenswrapper[4842]: I1111 14:10:53.820437 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" event={"ID":"448641d9-e39c-4fe4-bc02-cbf87ea74789","Type":"ContainerStarted","Data":"46ba78a709b3c31d8673434e4735e3c464baf15a42a4cf342a2bce9d76637b6f"} Nov 11 14:10:53 crc kubenswrapper[4842]: I1111 14:10:53.843653 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" podStartSLOduration=2.359126909 podStartE2EDuration="2.843632998s" podCreationTimestamp="2025-11-11 14:10:51 +0000 UTC" firstStartedPulling="2025-11-11 14:10:52.743210541 +0000 UTC m=+2463.403500150" lastFinishedPulling="2025-11-11 14:10:53.22771661 +0000 UTC m=+2463.888006239" observedRunningTime="2025-11-11 14:10:53.83923622 +0000 UTC m=+2464.499525849" watchObservedRunningTime="2025-11-11 14:10:53.843632998 +0000 UTC m=+2464.503922637" Nov 11 14:11:00 crc kubenswrapper[4842]: I1111 14:11:00.794324 4842 scope.go:117] "RemoveContainer" containerID="c471fe3400187582065aafaefdcc71a45198c8672f951425f26af8fe907d08dc" Nov 11 14:11:00 crc kubenswrapper[4842]: I1111 14:11:00.888696 4842 generic.go:334] "Generic (PLEG): container finished" podID="448641d9-e39c-4fe4-bc02-cbf87ea74789" containerID="46ba78a709b3c31d8673434e4735e3c464baf15a42a4cf342a2bce9d76637b6f" exitCode=0 Nov 11 14:11:00 crc kubenswrapper[4842]: I1111 14:11:00.888775 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" event={"ID":"448641d9-e39c-4fe4-bc02-cbf87ea74789","Type":"ContainerDied","Data":"46ba78a709b3c31d8673434e4735e3c464baf15a42a4cf342a2bce9d76637b6f"} Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.263270 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.450964 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6xdq\" (UniqueName: \"kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq\") pod \"448641d9-e39c-4fe4-bc02-cbf87ea74789\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.451227 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0\") pod \"448641d9-e39c-4fe4-bc02-cbf87ea74789\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.451303 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam\") pod \"448641d9-e39c-4fe4-bc02-cbf87ea74789\" (UID: \"448641d9-e39c-4fe4-bc02-cbf87ea74789\") " Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.458620 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq" (OuterVolumeSpecName: "kube-api-access-t6xdq") pod "448641d9-e39c-4fe4-bc02-cbf87ea74789" (UID: "448641d9-e39c-4fe4-bc02-cbf87ea74789"). InnerVolumeSpecName "kube-api-access-t6xdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.482840 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "448641d9-e39c-4fe4-bc02-cbf87ea74789" (UID: "448641d9-e39c-4fe4-bc02-cbf87ea74789"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.484739 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "448641d9-e39c-4fe4-bc02-cbf87ea74789" (UID: "448641d9-e39c-4fe4-bc02-cbf87ea74789"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.554527 4842 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.554567 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/448641d9-e39c-4fe4-bc02-cbf87ea74789-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.554581 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6xdq\" (UniqueName: \"kubernetes.io/projected/448641d9-e39c-4fe4-bc02-cbf87ea74789-kube-api-access-t6xdq\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.905625 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" event={"ID":"448641d9-e39c-4fe4-bc02-cbf87ea74789","Type":"ContainerDied","Data":"762f520f82759828075c2af8735f2c56507a10eddadabb37164c1aa3e2b27768"} Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.905950 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="762f520f82759828075c2af8735f2c56507a10eddadabb37164c1aa3e2b27768" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.905685 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-tmvxh" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.986486 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw"] Nov 11 14:11:02 crc kubenswrapper[4842]: E1111 14:11:02.986843 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="448641d9-e39c-4fe4-bc02-cbf87ea74789" containerName="ssh-known-hosts-edpm-deployment" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.986862 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="448641d9-e39c-4fe4-bc02-cbf87ea74789" containerName="ssh-known-hosts-edpm-deployment" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.987073 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="448641d9-e39c-4fe4-bc02-cbf87ea74789" containerName="ssh-known-hosts-edpm-deployment" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.987861 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.989718 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.989865 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.990461 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:11:02 crc kubenswrapper[4842]: I1111 14:11:02.993291 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.003772 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw"] Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.166980 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.167083 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.167131 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtfgk\" (UniqueName: \"kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.270006 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.270202 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtfgk\" (UniqueName: \"kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.270389 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.275149 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.275448 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.287930 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtfgk\" (UniqueName: \"kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-h2rqw\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.329375 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.842719 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw"] Nov 11 14:11:03 crc kubenswrapper[4842]: I1111 14:11:03.921408 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" event={"ID":"3250570c-99a1-4981-a05a-4ba474ed0ab2","Type":"ContainerStarted","Data":"b6d2ce936e7f59353e9d526bcfa8d3f7bacd4e33e923beaf1b43b3ff65613dee"} Nov 11 14:11:04 crc kubenswrapper[4842]: I1111 14:11:04.059836 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:11:04 crc kubenswrapper[4842]: E1111 14:11:04.060257 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:11:04 crc kubenswrapper[4842]: I1111 14:11:04.934247 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" event={"ID":"3250570c-99a1-4981-a05a-4ba474ed0ab2","Type":"ContainerStarted","Data":"c44064cf6149e9bf3b6b794720360c91967a8837ba6d96e8e865640cceb9c906"} Nov 11 14:11:04 crc kubenswrapper[4842]: I1111 14:11:04.970525 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" podStartSLOduration=2.432234431 podStartE2EDuration="2.970498107s" podCreationTimestamp="2025-11-11 14:11:02 +0000 UTC" firstStartedPulling="2025-11-11 14:11:03.8368507 +0000 UTC m=+2474.497140329" lastFinishedPulling="2025-11-11 14:11:04.375114376 +0000 UTC m=+2475.035404005" observedRunningTime="2025-11-11 14:11:04.959061595 +0000 UTC m=+2475.619351214" watchObservedRunningTime="2025-11-11 14:11:04.970498107 +0000 UTC m=+2475.630787736" Nov 11 14:11:14 crc kubenswrapper[4842]: I1111 14:11:14.036773 4842 generic.go:334] "Generic (PLEG): container finished" podID="3250570c-99a1-4981-a05a-4ba474ed0ab2" containerID="c44064cf6149e9bf3b6b794720360c91967a8837ba6d96e8e865640cceb9c906" exitCode=0 Nov 11 14:11:14 crc kubenswrapper[4842]: I1111 14:11:14.036880 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" event={"ID":"3250570c-99a1-4981-a05a-4ba474ed0ab2","Type":"ContainerDied","Data":"c44064cf6149e9bf3b6b794720360c91967a8837ba6d96e8e865640cceb9c906"} Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.063057 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:11:15 crc kubenswrapper[4842]: E1111 14:11:15.063923 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.514041 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.613897 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory\") pod \"3250570c-99a1-4981-a05a-4ba474ed0ab2\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.614147 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key\") pod \"3250570c-99a1-4981-a05a-4ba474ed0ab2\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.614354 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtfgk\" (UniqueName: \"kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk\") pod \"3250570c-99a1-4981-a05a-4ba474ed0ab2\" (UID: \"3250570c-99a1-4981-a05a-4ba474ed0ab2\") " Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.619969 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk" (OuterVolumeSpecName: "kube-api-access-rtfgk") pod "3250570c-99a1-4981-a05a-4ba474ed0ab2" (UID: "3250570c-99a1-4981-a05a-4ba474ed0ab2"). InnerVolumeSpecName "kube-api-access-rtfgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.642952 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory" (OuterVolumeSpecName: "inventory") pod "3250570c-99a1-4981-a05a-4ba474ed0ab2" (UID: "3250570c-99a1-4981-a05a-4ba474ed0ab2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.643686 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3250570c-99a1-4981-a05a-4ba474ed0ab2" (UID: "3250570c-99a1-4981-a05a-4ba474ed0ab2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.717390 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.717423 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtfgk\" (UniqueName: \"kubernetes.io/projected/3250570c-99a1-4981-a05a-4ba474ed0ab2-kube-api-access-rtfgk\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:15 crc kubenswrapper[4842]: I1111 14:11:15.717437 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3250570c-99a1-4981-a05a-4ba474ed0ab2-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.053925 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" event={"ID":"3250570c-99a1-4981-a05a-4ba474ed0ab2","Type":"ContainerDied","Data":"b6d2ce936e7f59353e9d526bcfa8d3f7bacd4e33e923beaf1b43b3ff65613dee"} Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.053971 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6d2ce936e7f59353e9d526bcfa8d3f7bacd4e33e923beaf1b43b3ff65613dee" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.054020 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-h2rqw" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.142338 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh"] Nov 11 14:11:16 crc kubenswrapper[4842]: E1111 14:11:16.142764 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3250570c-99a1-4981-a05a-4ba474ed0ab2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.142777 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3250570c-99a1-4981-a05a-4ba474ed0ab2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.142992 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3250570c-99a1-4981-a05a-4ba474ed0ab2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.143747 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.146731 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.148473 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.148746 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.149359 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.158368 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh"] Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.330865 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwxbl\" (UniqueName: \"kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.331322 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.331528 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.433260 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwxbl\" (UniqueName: \"kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.433409 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.433555 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.437423 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.446590 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.449781 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwxbl\" (UniqueName: \"kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.470812 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:16 crc kubenswrapper[4842]: I1111 14:11:16.997876 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh"] Nov 11 14:11:17 crc kubenswrapper[4842]: I1111 14:11:17.064960 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" event={"ID":"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac","Type":"ContainerStarted","Data":"fc9e98c81ccaa44c9404cec0e6960eb637f0a9d2d7883da05ac59c442b05ad5f"} Nov 11 14:11:18 crc kubenswrapper[4842]: I1111 14:11:18.075619 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" event={"ID":"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac","Type":"ContainerStarted","Data":"37c47bd36afbf0dba39dda7faa31345c42bca25b4fb1c9378e8947c2eabcf628"} Nov 11 14:11:18 crc kubenswrapper[4842]: I1111 14:11:18.104018 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" podStartSLOduration=1.401281588 podStartE2EDuration="2.103990787s" podCreationTimestamp="2025-11-11 14:11:16 +0000 UTC" firstStartedPulling="2025-11-11 14:11:17.006832222 +0000 UTC m=+2487.667121841" lastFinishedPulling="2025-11-11 14:11:17.709541421 +0000 UTC m=+2488.369831040" observedRunningTime="2025-11-11 14:11:18.095889891 +0000 UTC m=+2488.756179510" watchObservedRunningTime="2025-11-11 14:11:18.103990787 +0000 UTC m=+2488.764280416" Nov 11 14:11:28 crc kubenswrapper[4842]: I1111 14:11:28.059111 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:11:28 crc kubenswrapper[4842]: E1111 14:11:28.059898 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:11:28 crc kubenswrapper[4842]: I1111 14:11:28.185740 4842 generic.go:334] "Generic (PLEG): container finished" podID="c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" containerID="37c47bd36afbf0dba39dda7faa31345c42bca25b4fb1c9378e8947c2eabcf628" exitCode=0 Nov 11 14:11:28 crc kubenswrapper[4842]: I1111 14:11:28.185786 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" event={"ID":"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac","Type":"ContainerDied","Data":"37c47bd36afbf0dba39dda7faa31345c42bca25b4fb1c9378e8947c2eabcf628"} Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.669981 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.733312 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key\") pod \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.733571 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory\") pod \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.733617 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwxbl\" (UniqueName: \"kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl\") pod \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\" (UID: \"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac\") " Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.740391 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl" (OuterVolumeSpecName: "kube-api-access-gwxbl") pod "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" (UID: "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac"). InnerVolumeSpecName "kube-api-access-gwxbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.762734 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" (UID: "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.765201 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory" (OuterVolumeSpecName: "inventory") pod "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" (UID: "c87f657c-bfbc-4d66-9a66-f751fa8ac3ac"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.836741 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.836814 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:29 crc kubenswrapper[4842]: I1111 14:11:29.836825 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwxbl\" (UniqueName: \"kubernetes.io/projected/c87f657c-bfbc-4d66-9a66-f751fa8ac3ac-kube-api-access-gwxbl\") on node \"crc\" DevicePath \"\"" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.207646 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" event={"ID":"c87f657c-bfbc-4d66-9a66-f751fa8ac3ac","Type":"ContainerDied","Data":"fc9e98c81ccaa44c9404cec0e6960eb637f0a9d2d7883da05ac59c442b05ad5f"} Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.207686 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc9e98c81ccaa44c9404cec0e6960eb637f0a9d2d7883da05ac59c442b05ad5f" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.207753 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.297008 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs"] Nov 11 14:11:30 crc kubenswrapper[4842]: E1111 14:11:30.297381 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.297399 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.297600 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c87f657c-bfbc-4d66-9a66-f751fa8ac3ac" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.298229 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.301015 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.301138 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.301254 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.303075 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.303424 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.303746 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.303998 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.304059 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.320191 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs"] Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.344938 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l8xz\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.344984 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345063 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345309 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345380 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345406 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345569 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345614 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345634 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345655 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345707 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345732 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345784 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.345806 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447074 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447176 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447214 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447240 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447311 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447343 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447363 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447381 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447402 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447425 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447452 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447471 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447498 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l8xz\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.447542 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.452170 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.452229 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.452415 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.452765 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.452768 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.453638 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.455008 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.455554 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.455895 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.457521 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.457586 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.457779 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.458982 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.469082 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l8xz\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:30 crc kubenswrapper[4842]: I1111 14:11:30.625465 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:11:31 crc kubenswrapper[4842]: I1111 14:11:31.182578 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs"] Nov 11 14:11:31 crc kubenswrapper[4842]: I1111 14:11:31.216480 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" event={"ID":"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e","Type":"ContainerStarted","Data":"40609de391ff9ae6f4ca947ded4301fae44819658b3492638c05d1c747a83db8"} Nov 11 14:11:32 crc kubenswrapper[4842]: I1111 14:11:32.231000 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" event={"ID":"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e","Type":"ContainerStarted","Data":"2e4b4fb017ef4bb3dfe84aeb635381f23cf5786bdef5374f6fd4c510a297ea88"} Nov 11 14:11:32 crc kubenswrapper[4842]: I1111 14:11:32.272789 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" podStartSLOduration=1.8088249589999998 podStartE2EDuration="2.272769748s" podCreationTimestamp="2025-11-11 14:11:30 +0000 UTC" firstStartedPulling="2025-11-11 14:11:31.191166225 +0000 UTC m=+2501.851455844" lastFinishedPulling="2025-11-11 14:11:31.655111014 +0000 UTC m=+2502.315400633" observedRunningTime="2025-11-11 14:11:32.250211986 +0000 UTC m=+2502.910501645" watchObservedRunningTime="2025-11-11 14:11:32.272769748 +0000 UTC m=+2502.933059377" Nov 11 14:11:42 crc kubenswrapper[4842]: I1111 14:11:42.059752 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:11:42 crc kubenswrapper[4842]: E1111 14:11:42.060501 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:11:56 crc kubenswrapper[4842]: I1111 14:11:56.059760 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:11:56 crc kubenswrapper[4842]: E1111 14:11:56.063166 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:12:11 crc kubenswrapper[4842]: I1111 14:12:11.059328 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:12:11 crc kubenswrapper[4842]: E1111 14:12:11.061218 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:12:13 crc kubenswrapper[4842]: I1111 14:12:13.571229 4842 generic.go:334] "Generic (PLEG): container finished" podID="7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" containerID="2e4b4fb017ef4bb3dfe84aeb635381f23cf5786bdef5374f6fd4c510a297ea88" exitCode=0 Nov 11 14:12:13 crc kubenswrapper[4842]: I1111 14:12:13.571357 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" event={"ID":"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e","Type":"ContainerDied","Data":"2e4b4fb017ef4bb3dfe84aeb635381f23cf5786bdef5374f6fd4c510a297ea88"} Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.018725 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192262 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192315 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192370 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192406 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192428 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192457 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192490 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192522 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192565 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192643 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9l8xz\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192727 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192758 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192822 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.192847 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle\") pod \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\" (UID: \"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e\") " Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.199968 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz" (OuterVolumeSpecName: "kube-api-access-9l8xz") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "kube-api-access-9l8xz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.200638 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.200895 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.201057 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.201863 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.203307 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.203403 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.203757 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.204176 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.204544 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.205535 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.206050 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.244488 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory" (OuterVolumeSpecName: "inventory") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.247036 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" (UID: "7252c6dc-fc9e-44ee-bea7-1b61760f4f8e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295882 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295927 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295940 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295954 4842 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295968 4842 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295980 4842 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.295992 4842 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296004 4842 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296017 4842 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296029 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296041 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296055 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296067 4842 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.296078 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9l8xz\" (UniqueName: \"kubernetes.io/projected/7252c6dc-fc9e-44ee-bea7-1b61760f4f8e-kube-api-access-9l8xz\") on node \"crc\" DevicePath \"\"" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.592029 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" event={"ID":"7252c6dc-fc9e-44ee-bea7-1b61760f4f8e","Type":"ContainerDied","Data":"40609de391ff9ae6f4ca947ded4301fae44819658b3492638c05d1c747a83db8"} Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.592073 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40609de391ff9ae6f4ca947ded4301fae44819658b3492638c05d1c747a83db8" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.592119 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.696386 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56"] Nov 11 14:12:15 crc kubenswrapper[4842]: E1111 14:12:15.696789 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.696807 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.697032 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="7252c6dc-fc9e-44ee-bea7-1b61760f4f8e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.697830 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.703705 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.703799 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.703879 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.703911 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.703941 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wksbk\" (UniqueName: \"kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.707138 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.707333 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.707725 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.707873 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.708225 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.711198 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56"] Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.806667 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.806821 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.806946 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.806972 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.806998 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wksbk\" (UniqueName: \"kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.808525 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.811296 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.811859 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.818988 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:15 crc kubenswrapper[4842]: I1111 14:12:15.832705 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wksbk\" (UniqueName: \"kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-sqg56\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:16 crc kubenswrapper[4842]: I1111 14:12:16.013354 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:12:16 crc kubenswrapper[4842]: I1111 14:12:16.638761 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56"] Nov 11 14:12:17 crc kubenswrapper[4842]: I1111 14:12:17.612495 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" event={"ID":"92fb25d5-d93a-4932-8d37-94ca7302c774","Type":"ContainerStarted","Data":"150efa45d51e7d56cc5ce755c696898b71d1a2926fbcee57b8dfbb150e73dd8a"} Nov 11 14:12:17 crc kubenswrapper[4842]: I1111 14:12:17.613060 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" event={"ID":"92fb25d5-d93a-4932-8d37-94ca7302c774","Type":"ContainerStarted","Data":"9d3e12c01d47d293dc3d5c77315491fdb0a5e55393e0dfe4affcd683dd22c39b"} Nov 11 14:12:17 crc kubenswrapper[4842]: I1111 14:12:17.640976 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" podStartSLOduration=2.089581918 podStartE2EDuration="2.640953328s" podCreationTimestamp="2025-11-11 14:12:15 +0000 UTC" firstStartedPulling="2025-11-11 14:12:16.645817996 +0000 UTC m=+2547.306107615" lastFinishedPulling="2025-11-11 14:12:17.197189406 +0000 UTC m=+2547.857479025" observedRunningTime="2025-11-11 14:12:17.636715185 +0000 UTC m=+2548.297004804" watchObservedRunningTime="2025-11-11 14:12:17.640953328 +0000 UTC m=+2548.301242947" Nov 11 14:12:23 crc kubenswrapper[4842]: I1111 14:12:23.058847 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:12:23 crc kubenswrapper[4842]: E1111 14:12:23.059648 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:12:38 crc kubenswrapper[4842]: I1111 14:12:38.059668 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:12:38 crc kubenswrapper[4842]: E1111 14:12:38.060522 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:12:49 crc kubenswrapper[4842]: I1111 14:12:49.059156 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:12:49 crc kubenswrapper[4842]: E1111 14:12:49.059983 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:13:01 crc kubenswrapper[4842]: I1111 14:13:01.059287 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:13:01 crc kubenswrapper[4842]: E1111 14:13:01.060259 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:13:12 crc kubenswrapper[4842]: I1111 14:13:12.059074 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:13:12 crc kubenswrapper[4842]: E1111 14:13:12.059841 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:13:24 crc kubenswrapper[4842]: I1111 14:13:24.342073 4842 generic.go:334] "Generic (PLEG): container finished" podID="92fb25d5-d93a-4932-8d37-94ca7302c774" containerID="150efa45d51e7d56cc5ce755c696898b71d1a2926fbcee57b8dfbb150e73dd8a" exitCode=0 Nov 11 14:13:24 crc kubenswrapper[4842]: I1111 14:13:24.342174 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" event={"ID":"92fb25d5-d93a-4932-8d37-94ca7302c774","Type":"ContainerDied","Data":"150efa45d51e7d56cc5ce755c696898b71d1a2926fbcee57b8dfbb150e73dd8a"} Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.748671 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.887072 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wksbk\" (UniqueName: \"kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk\") pod \"92fb25d5-d93a-4932-8d37-94ca7302c774\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.887172 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory\") pod \"92fb25d5-d93a-4932-8d37-94ca7302c774\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.887253 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key\") pod \"92fb25d5-d93a-4932-8d37-94ca7302c774\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.887335 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle\") pod \"92fb25d5-d93a-4932-8d37-94ca7302c774\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.887380 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0\") pod \"92fb25d5-d93a-4932-8d37-94ca7302c774\" (UID: \"92fb25d5-d93a-4932-8d37-94ca7302c774\") " Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.893159 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk" (OuterVolumeSpecName: "kube-api-access-wksbk") pod "92fb25d5-d93a-4932-8d37-94ca7302c774" (UID: "92fb25d5-d93a-4932-8d37-94ca7302c774"). InnerVolumeSpecName "kube-api-access-wksbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.894038 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "92fb25d5-d93a-4932-8d37-94ca7302c774" (UID: "92fb25d5-d93a-4932-8d37-94ca7302c774"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.915509 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "92fb25d5-d93a-4932-8d37-94ca7302c774" (UID: "92fb25d5-d93a-4932-8d37-94ca7302c774"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.920269 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory" (OuterVolumeSpecName: "inventory") pod "92fb25d5-d93a-4932-8d37-94ca7302c774" (UID: "92fb25d5-d93a-4932-8d37-94ca7302c774"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.924932 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "92fb25d5-d93a-4932-8d37-94ca7302c774" (UID: "92fb25d5-d93a-4932-8d37-94ca7302c774"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.990264 4842 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.990303 4842 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92fb25d5-d93a-4932-8d37-94ca7302c774-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.990315 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wksbk\" (UniqueName: \"kubernetes.io/projected/92fb25d5-d93a-4932-8d37-94ca7302c774-kube-api-access-wksbk\") on node \"crc\" DevicePath \"\"" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.990327 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:13:25 crc kubenswrapper[4842]: I1111 14:13:25.990338 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92fb25d5-d93a-4932-8d37-94ca7302c774-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.058817 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:13:26 crc kubenswrapper[4842]: E1111 14:13:26.059274 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.361306 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" event={"ID":"92fb25d5-d93a-4932-8d37-94ca7302c774","Type":"ContainerDied","Data":"9d3e12c01d47d293dc3d5c77315491fdb0a5e55393e0dfe4affcd683dd22c39b"} Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.361623 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d3e12c01d47d293dc3d5c77315491fdb0a5e55393e0dfe4affcd683dd22c39b" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.361397 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-sqg56" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.458954 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp"] Nov 11 14:13:26 crc kubenswrapper[4842]: E1111 14:13:26.459408 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92fb25d5-d93a-4932-8d37-94ca7302c774" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.459428 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="92fb25d5-d93a-4932-8d37-94ca7302c774" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.459629 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="92fb25d5-d93a-4932-8d37-94ca7302c774" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.460306 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.462903 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.462925 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.463598 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.463748 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.464388 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.465729 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.476253 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp"] Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.500751 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt27h\" (UniqueName: \"kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.500827 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.500867 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.500894 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.500951 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.501016 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602422 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt27h\" (UniqueName: \"kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602486 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602747 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602785 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602853 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.602922 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.607630 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.609287 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.610958 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.612201 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.612590 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.622505 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt27h\" (UniqueName: \"kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:26 crc kubenswrapper[4842]: I1111 14:13:26.778734 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:13:27 crc kubenswrapper[4842]: I1111 14:13:27.322531 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp"] Nov 11 14:13:27 crc kubenswrapper[4842]: I1111 14:13:27.330599 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:13:27 crc kubenswrapper[4842]: I1111 14:13:27.373353 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" event={"ID":"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524","Type":"ContainerStarted","Data":"2d13ece3543cdb33a75ea353d8b1a8b10de08f75edf29b945305f454ee9c88fc"} Nov 11 14:13:28 crc kubenswrapper[4842]: I1111 14:13:28.383014 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" event={"ID":"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524","Type":"ContainerStarted","Data":"475be7186a28c67f4d9ca4a7deff2e152e0b0ed57f1f2e22364b27c803a224ce"} Nov 11 14:13:28 crc kubenswrapper[4842]: I1111 14:13:28.405613 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" podStartSLOduration=1.923838465 podStartE2EDuration="2.405587146s" podCreationTimestamp="2025-11-11 14:13:26 +0000 UTC" firstStartedPulling="2025-11-11 14:13:27.330347007 +0000 UTC m=+2617.990636626" lastFinishedPulling="2025-11-11 14:13:27.812095688 +0000 UTC m=+2618.472385307" observedRunningTime="2025-11-11 14:13:28.399513744 +0000 UTC m=+2619.059803373" watchObservedRunningTime="2025-11-11 14:13:28.405587146 +0000 UTC m=+2619.065876775" Nov 11 14:13:41 crc kubenswrapper[4842]: I1111 14:13:41.059619 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:13:41 crc kubenswrapper[4842]: E1111 14:13:41.060422 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:13:56 crc kubenswrapper[4842]: I1111 14:13:56.059322 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:13:56 crc kubenswrapper[4842]: E1111 14:13:56.060320 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:14:08 crc kubenswrapper[4842]: I1111 14:14:08.059049 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:14:08 crc kubenswrapper[4842]: E1111 14:14:08.059755 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:14:19 crc kubenswrapper[4842]: I1111 14:14:19.060168 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:14:19 crc kubenswrapper[4842]: E1111 14:14:19.060986 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:14:19 crc kubenswrapper[4842]: I1111 14:14:19.837544 4842 generic.go:334] "Generic (PLEG): container finished" podID="576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" containerID="475be7186a28c67f4d9ca4a7deff2e152e0b0ed57f1f2e22364b27c803a224ce" exitCode=0 Nov 11 14:14:19 crc kubenswrapper[4842]: I1111 14:14:19.837626 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" event={"ID":"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524","Type":"ContainerDied","Data":"475be7186a28c67f4d9ca4a7deff2e152e0b0ed57f1f2e22364b27c803a224ce"} Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.308309 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401451 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401499 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401521 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401565 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401647 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt27h\" (UniqueName: \"kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.401721 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0\") pod \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\" (UID: \"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524\") " Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.407986 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.408387 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h" (OuterVolumeSpecName: "kube-api-access-wt27h") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "kube-api-access-wt27h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.431848 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.436819 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.438520 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.439286 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory" (OuterVolumeSpecName: "inventory") pod "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" (UID: "576c96ec-4ad4-4eee-ae3b-10b4b4aa5524"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504001 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504038 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt27h\" (UniqueName: \"kubernetes.io/projected/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-kube-api-access-wt27h\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504052 4842 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504061 4842 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504075 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.504083 4842 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/576c96ec-4ad4-4eee-ae3b-10b4b4aa5524-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.859074 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" event={"ID":"576c96ec-4ad4-4eee-ae3b-10b4b4aa5524","Type":"ContainerDied","Data":"2d13ece3543cdb33a75ea353d8b1a8b10de08f75edf29b945305f454ee9c88fc"} Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.859135 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d13ece3543cdb33a75ea353d8b1a8b10de08f75edf29b945305f454ee9c88fc" Nov 11 14:14:21 crc kubenswrapper[4842]: I1111 14:14:21.859208 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.002038 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx"] Nov 11 14:14:22 crc kubenswrapper[4842]: E1111 14:14:22.002554 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.002580 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.002854 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="576c96ec-4ad4-4eee-ae3b-10b4b4aa5524" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.003720 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.007657 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.008330 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.008588 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.009740 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.016491 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.017486 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx"] Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.116456 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.116529 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.116608 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.116830 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6g6h\" (UniqueName: \"kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.116894 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.218299 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.218368 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.218457 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.218547 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6g6h\" (UniqueName: \"kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.218583 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.223535 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.223846 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.228221 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.229557 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.246065 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6g6h\" (UniqueName: \"kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.327038 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.851290 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx"] Nov 11 14:14:22 crc kubenswrapper[4842]: I1111 14:14:22.874908 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" event={"ID":"28de897b-72a6-4d7b-b7e7-e205a32fe32d","Type":"ContainerStarted","Data":"2ac5f28ae6252f8b06a64c0a25b848cf78c467f95175dc071fa0f6a524ce6d69"} Nov 11 14:14:24 crc kubenswrapper[4842]: I1111 14:14:24.894757 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" event={"ID":"28de897b-72a6-4d7b-b7e7-e205a32fe32d","Type":"ContainerStarted","Data":"6009204f2e0da586a3e05db24c985177c79a244b0ace56ecf0d91357c0e36dd4"} Nov 11 14:14:32 crc kubenswrapper[4842]: I1111 14:14:32.059090 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:14:32 crc kubenswrapper[4842]: E1111 14:14:32.059891 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:14:44 crc kubenswrapper[4842]: I1111 14:14:44.059914 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:14:44 crc kubenswrapper[4842]: E1111 14:14:44.060773 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:14:57 crc kubenswrapper[4842]: I1111 14:14:57.059314 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:14:58 crc kubenswrapper[4842]: I1111 14:14:58.248713 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522"} Nov 11 14:14:58 crc kubenswrapper[4842]: I1111 14:14:58.267951 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" podStartSLOduration=36.389778182 podStartE2EDuration="37.267935528s" podCreationTimestamp="2025-11-11 14:14:21 +0000 UTC" firstStartedPulling="2025-11-11 14:14:22.857690901 +0000 UTC m=+2673.517980520" lastFinishedPulling="2025-11-11 14:14:23.735848217 +0000 UTC m=+2674.396137866" observedRunningTime="2025-11-11 14:14:24.917524047 +0000 UTC m=+2675.577813676" watchObservedRunningTime="2025-11-11 14:14:58.267935528 +0000 UTC m=+2708.928225147" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.154506 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn"] Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.158422 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.163769 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.163787 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.171286 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn"] Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.302700 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.302788 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.302907 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m49m9\" (UniqueName: \"kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.404533 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.404652 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.404718 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m49m9\" (UniqueName: \"kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.407748 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.416856 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.423111 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m49m9\" (UniqueName: \"kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9\") pod \"collect-profiles-29381175-krcnn\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.479624 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:00 crc kubenswrapper[4842]: I1111 14:15:00.919844 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn"] Nov 11 14:15:00 crc kubenswrapper[4842]: W1111 14:15:00.932454 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf392a4d4_8117_4b08_8e41_4d92d5d10a27.slice/crio-a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c WatchSource:0}: Error finding container a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c: Status 404 returned error can't find the container with id a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c Nov 11 14:15:01 crc kubenswrapper[4842]: I1111 14:15:01.276796 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" event={"ID":"f392a4d4-8117-4b08-8e41-4d92d5d10a27","Type":"ContainerStarted","Data":"a616b45a044341f2dd5a85cc93c5a6587b6428a6b295b8d264680ba37a4605b7"} Nov 11 14:15:01 crc kubenswrapper[4842]: I1111 14:15:01.276885 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" event={"ID":"f392a4d4-8117-4b08-8e41-4d92d5d10a27","Type":"ContainerStarted","Data":"a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c"} Nov 11 14:15:01 crc kubenswrapper[4842]: I1111 14:15:01.316820 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" podStartSLOduration=1.316800451 podStartE2EDuration="1.316800451s" podCreationTimestamp="2025-11-11 14:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:15:01.307955352 +0000 UTC m=+2711.968244971" watchObservedRunningTime="2025-11-11 14:15:01.316800451 +0000 UTC m=+2711.977090070" Nov 11 14:15:02 crc kubenswrapper[4842]: I1111 14:15:02.288248 4842 generic.go:334] "Generic (PLEG): container finished" podID="f392a4d4-8117-4b08-8e41-4d92d5d10a27" containerID="a616b45a044341f2dd5a85cc93c5a6587b6428a6b295b8d264680ba37a4605b7" exitCode=0 Nov 11 14:15:02 crc kubenswrapper[4842]: I1111 14:15:02.288348 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" event={"ID":"f392a4d4-8117-4b08-8e41-4d92d5d10a27","Type":"ContainerDied","Data":"a616b45a044341f2dd5a85cc93c5a6587b6428a6b295b8d264680ba37a4605b7"} Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.604711 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.734102 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:03 crc kubenswrapper[4842]: E1111 14:15:03.734561 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f392a4d4-8117-4b08-8e41-4d92d5d10a27" containerName="collect-profiles" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.734578 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f392a4d4-8117-4b08-8e41-4d92d5d10a27" containerName="collect-profiles" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.734778 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f392a4d4-8117-4b08-8e41-4d92d5d10a27" containerName="collect-profiles" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.736621 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.756099 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.785018 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m49m9\" (UniqueName: \"kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9\") pod \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.785173 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume\") pod \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.785537 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume\") pod \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\" (UID: \"f392a4d4-8117-4b08-8e41-4d92d5d10a27\") " Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.786354 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume" (OuterVolumeSpecName: "config-volume") pod "f392a4d4-8117-4b08-8e41-4d92d5d10a27" (UID: "f392a4d4-8117-4b08-8e41-4d92d5d10a27"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.787091 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f392a4d4-8117-4b08-8e41-4d92d5d10a27-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.793231 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f392a4d4-8117-4b08-8e41-4d92d5d10a27" (UID: "f392a4d4-8117-4b08-8e41-4d92d5d10a27"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.795220 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9" (OuterVolumeSpecName: "kube-api-access-m49m9") pod "f392a4d4-8117-4b08-8e41-4d92d5d10a27" (UID: "f392a4d4-8117-4b08-8e41-4d92d5d10a27"). InnerVolumeSpecName "kube-api-access-m49m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.888839 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.889228 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdlfs\" (UniqueName: \"kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.889302 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.889601 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m49m9\" (UniqueName: \"kubernetes.io/projected/f392a4d4-8117-4b08-8e41-4d92d5d10a27-kube-api-access-m49m9\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.889623 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f392a4d4-8117-4b08-8e41-4d92d5d10a27-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.991110 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.991458 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdlfs\" (UniqueName: \"kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.991480 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.991894 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:03 crc kubenswrapper[4842]: I1111 14:15:03.991918 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.010586 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdlfs\" (UniqueName: \"kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs\") pod \"community-operators-jlw69\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.064299 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.312712 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" event={"ID":"f392a4d4-8117-4b08-8e41-4d92d5d10a27","Type":"ContainerDied","Data":"a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c"} Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.312754 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3373733da356a3c1a4f613abb0fac6815ab164e719e01bd2341e11729ff931c" Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.312809 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn" Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.394608 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.706495 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m"] Nov 11 14:15:04 crc kubenswrapper[4842]: I1111 14:15:04.722717 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381130-ldl4m"] Nov 11 14:15:05 crc kubenswrapper[4842]: I1111 14:15:05.325676 4842 generic.go:334] "Generic (PLEG): container finished" podID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerID="4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9" exitCode=0 Nov 11 14:15:05 crc kubenswrapper[4842]: I1111 14:15:05.325723 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerDied","Data":"4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9"} Nov 11 14:15:05 crc kubenswrapper[4842]: I1111 14:15:05.325748 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerStarted","Data":"c8156b73da3e4990d5056d2aa8e927130615adf76240dd4dda159126d1298c47"} Nov 11 14:15:06 crc kubenswrapper[4842]: I1111 14:15:06.079507 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b562af49-ec32-42e1-86ee-48b4d7d9e3e2" path="/var/lib/kubelet/pods/b562af49-ec32-42e1-86ee-48b4d7d9e3e2/volumes" Nov 11 14:15:07 crc kubenswrapper[4842]: I1111 14:15:07.348455 4842 generic.go:334] "Generic (PLEG): container finished" podID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerID="0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f" exitCode=0 Nov 11 14:15:07 crc kubenswrapper[4842]: I1111 14:15:07.348572 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerDied","Data":"0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f"} Nov 11 14:15:08 crc kubenswrapper[4842]: I1111 14:15:08.362473 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerStarted","Data":"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a"} Nov 11 14:15:08 crc kubenswrapper[4842]: I1111 14:15:08.385542 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jlw69" podStartSLOduration=2.7349983399999998 podStartE2EDuration="5.385522546s" podCreationTimestamp="2025-11-11 14:15:03 +0000 UTC" firstStartedPulling="2025-11-11 14:15:05.32961275 +0000 UTC m=+2715.989902369" lastFinishedPulling="2025-11-11 14:15:07.980136916 +0000 UTC m=+2718.640426575" observedRunningTime="2025-11-11 14:15:08.383005816 +0000 UTC m=+2719.043295445" watchObservedRunningTime="2025-11-11 14:15:08.385522546 +0000 UTC m=+2719.045812165" Nov 11 14:15:14 crc kubenswrapper[4842]: I1111 14:15:14.071470 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:14 crc kubenswrapper[4842]: I1111 14:15:14.071883 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:14 crc kubenswrapper[4842]: I1111 14:15:14.120235 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:14 crc kubenswrapper[4842]: I1111 14:15:14.476157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:14 crc kubenswrapper[4842]: I1111 14:15:14.521053 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:16 crc kubenswrapper[4842]: I1111 14:15:16.436515 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jlw69" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="registry-server" containerID="cri-o://ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a" gracePeriod=2 Nov 11 14:15:16 crc kubenswrapper[4842]: I1111 14:15:16.975608 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.087352 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content\") pod \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.087464 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdlfs\" (UniqueName: \"kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs\") pod \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.087847 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities\") pod \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\" (UID: \"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa\") " Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.088602 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities" (OuterVolumeSpecName: "utilities") pod "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" (UID: "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.095527 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs" (OuterVolumeSpecName: "kube-api-access-pdlfs") pod "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" (UID: "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa"). InnerVolumeSpecName "kube-api-access-pdlfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.147121 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" (UID: "f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.190208 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.190241 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdlfs\" (UniqueName: \"kubernetes.io/projected/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-kube-api-access-pdlfs\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.190253 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.449863 4842 generic.go:334] "Generic (PLEG): container finished" podID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerID="ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a" exitCode=0 Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.449937 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerDied","Data":"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a"} Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.449950 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jlw69" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.449990 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jlw69" event={"ID":"f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa","Type":"ContainerDied","Data":"c8156b73da3e4990d5056d2aa8e927130615adf76240dd4dda159126d1298c47"} Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.450019 4842 scope.go:117] "RemoveContainer" containerID="ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.486573 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.492764 4842 scope.go:117] "RemoveContainer" containerID="0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.496223 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jlw69"] Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.513988 4842 scope.go:117] "RemoveContainer" containerID="4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.557640 4842 scope.go:117] "RemoveContainer" containerID="ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a" Nov 11 14:15:17 crc kubenswrapper[4842]: E1111 14:15:17.558043 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a\": container with ID starting with ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a not found: ID does not exist" containerID="ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.558075 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a"} err="failed to get container status \"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a\": rpc error: code = NotFound desc = could not find container \"ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a\": container with ID starting with ddbb2cb751880244a37a937531ca6c923ef5bafb8ee184a647deec6a9cbc242a not found: ID does not exist" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.558121 4842 scope.go:117] "RemoveContainer" containerID="0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f" Nov 11 14:15:17 crc kubenswrapper[4842]: E1111 14:15:17.558507 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f\": container with ID starting with 0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f not found: ID does not exist" containerID="0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.558545 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f"} err="failed to get container status \"0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f\": rpc error: code = NotFound desc = could not find container \"0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f\": container with ID starting with 0f9a11881ef17a66ef721c8e478c675d3da3272a0c0c990e1383f4df9495647f not found: ID does not exist" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.558570 4842 scope.go:117] "RemoveContainer" containerID="4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9" Nov 11 14:15:17 crc kubenswrapper[4842]: E1111 14:15:17.558820 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9\": container with ID starting with 4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9 not found: ID does not exist" containerID="4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9" Nov 11 14:15:17 crc kubenswrapper[4842]: I1111 14:15:17.558844 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9"} err="failed to get container status \"4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9\": rpc error: code = NotFound desc = could not find container \"4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9\": container with ID starting with 4d43bf568a0df292beed6ccecb5457afb64926932be4b985abd66c26f2ee2ca9 not found: ID does not exist" Nov 11 14:15:18 crc kubenswrapper[4842]: I1111 14:15:18.081424 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" path="/var/lib/kubelet/pods/f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa/volumes" Nov 11 14:16:00 crc kubenswrapper[4842]: I1111 14:16:00.948844 4842 scope.go:117] "RemoveContainer" containerID="487995fccfdf81271810a9cb99791a3e1e9da562d710cebd6c133a2497d423dd" Nov 11 14:17:14 crc kubenswrapper[4842]: I1111 14:17:14.961139 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:17:14 crc kubenswrapper[4842]: I1111 14:17:14.961699 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:17:44 crc kubenswrapper[4842]: I1111 14:17:44.961367 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:17:44 crc kubenswrapper[4842]: I1111 14:17:44.962021 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:18:14 crc kubenswrapper[4842]: I1111 14:18:14.961365 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:18:14 crc kubenswrapper[4842]: I1111 14:18:14.961837 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:18:14 crc kubenswrapper[4842]: I1111 14:18:14.961882 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:18:14 crc kubenswrapper[4842]: I1111 14:18:14.962689 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:18:14 crc kubenswrapper[4842]: I1111 14:18:14.962743 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522" gracePeriod=600 Nov 11 14:18:15 crc kubenswrapper[4842]: I1111 14:18:15.139860 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522" exitCode=0 Nov 11 14:18:15 crc kubenswrapper[4842]: I1111 14:18:15.139905 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522"} Nov 11 14:18:15 crc kubenswrapper[4842]: I1111 14:18:15.139936 4842 scope.go:117] "RemoveContainer" containerID="636a2f5c0c3d60b1dd4a4cb1f2a7c194923cf1b1ba6c4e60b569539784302dde" Nov 11 14:18:16 crc kubenswrapper[4842]: I1111 14:18:16.150703 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649"} Nov 11 14:19:03 crc kubenswrapper[4842]: I1111 14:19:03.062627 4842 generic.go:334] "Generic (PLEG): container finished" podID="28de897b-72a6-4d7b-b7e7-e205a32fe32d" containerID="6009204f2e0da586a3e05db24c985177c79a244b0ace56ecf0d91357c0e36dd4" exitCode=0 Nov 11 14:19:03 crc kubenswrapper[4842]: I1111 14:19:03.062714 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" event={"ID":"28de897b-72a6-4d7b-b7e7-e205a32fe32d","Type":"ContainerDied","Data":"6009204f2e0da586a3e05db24c985177c79a244b0ace56ecf0d91357c0e36dd4"} Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.520317 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.708196 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory\") pod \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.708435 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6g6h\" (UniqueName: \"kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h\") pod \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.708568 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key\") pod \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.708658 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0\") pod \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.709309 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle\") pod \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\" (UID: \"28de897b-72a6-4d7b-b7e7-e205a32fe32d\") " Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.714912 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h" (OuterVolumeSpecName: "kube-api-access-b6g6h") pod "28de897b-72a6-4d7b-b7e7-e205a32fe32d" (UID: "28de897b-72a6-4d7b-b7e7-e205a32fe32d"). InnerVolumeSpecName "kube-api-access-b6g6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.718294 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "28de897b-72a6-4d7b-b7e7-e205a32fe32d" (UID: "28de897b-72a6-4d7b-b7e7-e205a32fe32d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.739000 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory" (OuterVolumeSpecName: "inventory") pod "28de897b-72a6-4d7b-b7e7-e205a32fe32d" (UID: "28de897b-72a6-4d7b-b7e7-e205a32fe32d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.743431 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "28de897b-72a6-4d7b-b7e7-e205a32fe32d" (UID: "28de897b-72a6-4d7b-b7e7-e205a32fe32d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.753738 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "28de897b-72a6-4d7b-b7e7-e205a32fe32d" (UID: "28de897b-72a6-4d7b-b7e7-e205a32fe32d"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.812576 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.812610 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6g6h\" (UniqueName: \"kubernetes.io/projected/28de897b-72a6-4d7b-b7e7-e205a32fe32d-kube-api-access-b6g6h\") on node \"crc\" DevicePath \"\"" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.812620 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.812632 4842 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:19:04 crc kubenswrapper[4842]: I1111 14:19:04.812641 4842 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28de897b-72a6-4d7b-b7e7-e205a32fe32d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.115413 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" event={"ID":"28de897b-72a6-4d7b-b7e7-e205a32fe32d","Type":"ContainerDied","Data":"2ac5f28ae6252f8b06a64c0a25b848cf78c467f95175dc071fa0f6a524ce6d69"} Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.115462 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ac5f28ae6252f8b06a64c0a25b848cf78c467f95175dc071fa0f6a524ce6d69" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.115466 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.212895 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq"] Nov 11 14:19:05 crc kubenswrapper[4842]: E1111 14:19:05.213426 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="extract-utilities" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213447 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="extract-utilities" Nov 11 14:19:05 crc kubenswrapper[4842]: E1111 14:19:05.213465 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="registry-server" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213474 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="registry-server" Nov 11 14:19:05 crc kubenswrapper[4842]: E1111 14:19:05.213487 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="extract-content" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213495 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="extract-content" Nov 11 14:19:05 crc kubenswrapper[4842]: E1111 14:19:05.213546 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28de897b-72a6-4d7b-b7e7-e205a32fe32d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213555 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="28de897b-72a6-4d7b-b7e7-e205a32fe32d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213790 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f26272db-cba6-4f37-b0bf-5d3a6dcfa7fa" containerName="registry-server" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.213817 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="28de897b-72a6-4d7b-b7e7-e205a32fe32d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.214682 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218247 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218299 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218357 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9gmx\" (UniqueName: \"kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218425 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218465 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218498 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218545 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218986 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218562 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.219215 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.219229 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.219295 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.218988 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.219543 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.219631 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.221203 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.234053 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq"] Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321017 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321064 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321138 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321171 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321197 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321220 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321241 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321288 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9gmx\" (UniqueName: \"kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.321349 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.322347 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.325094 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.325526 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.325535 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.325966 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.326737 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.334646 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.336796 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9gmx\" (UniqueName: \"kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.339382 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-65pxq\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:05 crc kubenswrapper[4842]: I1111 14:19:05.573211 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:19:06 crc kubenswrapper[4842]: I1111 14:19:06.116753 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq"] Nov 11 14:19:06 crc kubenswrapper[4842]: I1111 14:19:06.130958 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:19:07 crc kubenswrapper[4842]: I1111 14:19:07.132907 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" event={"ID":"908c0c25-452c-4fba-9fbd-d76fa35416af","Type":"ContainerStarted","Data":"aa67f9c923bfad62c7cea0ad22cce9320caae6a4153bb5664a215e3d5dc67692"} Nov 11 14:19:07 crc kubenswrapper[4842]: I1111 14:19:07.133182 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" event={"ID":"908c0c25-452c-4fba-9fbd-d76fa35416af","Type":"ContainerStarted","Data":"9a7c1e8d07960d3173b846fcc11ba4c6f28f79e69cdc2b87f6ba4dee155a9893"} Nov 11 14:19:07 crc kubenswrapper[4842]: I1111 14:19:07.156013 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" podStartSLOduration=1.680060181 podStartE2EDuration="2.155993166s" podCreationTimestamp="2025-11-11 14:19:05 +0000 UTC" firstStartedPulling="2025-11-11 14:19:06.130709429 +0000 UTC m=+2956.790999038" lastFinishedPulling="2025-11-11 14:19:06.606642404 +0000 UTC m=+2957.266932023" observedRunningTime="2025-11-11 14:19:07.149210986 +0000 UTC m=+2957.809500615" watchObservedRunningTime="2025-11-11 14:19:07.155993166 +0000 UTC m=+2957.816282785" Nov 11 14:19:57 crc kubenswrapper[4842]: I1111 14:19:57.932320 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:19:57 crc kubenswrapper[4842]: I1111 14:19:57.936069 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:57 crc kubenswrapper[4842]: I1111 14:19:57.945687 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.079717 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmsdd\" (UniqueName: \"kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.079851 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.079969 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.182251 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.182394 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.182515 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmsdd\" (UniqueName: \"kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.183593 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.183678 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.205898 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmsdd\" (UniqueName: \"kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd\") pod \"redhat-marketplace-6xgv2\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.255442 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:19:58 crc kubenswrapper[4842]: I1111 14:19:58.776931 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:19:59 crc kubenswrapper[4842]: I1111 14:19:59.622228 4842 generic.go:334] "Generic (PLEG): container finished" podID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerID="9874a312801dd406c7451218bd5e13285f1d122b3ec28a168c62cbb3f4cf3ffe" exitCode=0 Nov 11 14:19:59 crc kubenswrapper[4842]: I1111 14:19:59.622288 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerDied","Data":"9874a312801dd406c7451218bd5e13285f1d122b3ec28a168c62cbb3f4cf3ffe"} Nov 11 14:19:59 crc kubenswrapper[4842]: I1111 14:19:59.622570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerStarted","Data":"a828d7b95324d9ebbf00096a7b1578e685a6c9d57b5a1505d51c2f476a28d164"} Nov 11 14:20:00 crc kubenswrapper[4842]: I1111 14:20:00.632463 4842 generic.go:334] "Generic (PLEG): container finished" podID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerID="64085c20c446fdd683a9fdc85676034691e948d21c7d71f5c4c42b193d90a0eb" exitCode=0 Nov 11 14:20:00 crc kubenswrapper[4842]: I1111 14:20:00.632505 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerDied","Data":"64085c20c446fdd683a9fdc85676034691e948d21c7d71f5c4c42b193d90a0eb"} Nov 11 14:20:01 crc kubenswrapper[4842]: I1111 14:20:01.646815 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerStarted","Data":"bbdcd96bb99a6b84e36c7cee947a9cafc3402a5cc4e79184f5c18bc0bd1b165f"} Nov 11 14:20:01 crc kubenswrapper[4842]: I1111 14:20:01.672667 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6xgv2" podStartSLOduration=3.039002002 podStartE2EDuration="4.672647842s" podCreationTimestamp="2025-11-11 14:19:57 +0000 UTC" firstStartedPulling="2025-11-11 14:19:59.624236644 +0000 UTC m=+3010.284526263" lastFinishedPulling="2025-11-11 14:20:01.257882474 +0000 UTC m=+3011.918172103" observedRunningTime="2025-11-11 14:20:01.669602177 +0000 UTC m=+3012.329891806" watchObservedRunningTime="2025-11-11 14:20:01.672647842 +0000 UTC m=+3012.332937451" Nov 11 14:20:06 crc kubenswrapper[4842]: I1111 14:20:06.989089 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:06 crc kubenswrapper[4842]: I1111 14:20:06.991415 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.006237 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.075721 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.075873 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.075987 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9lbt\" (UniqueName: \"kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.178076 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.178173 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9lbt\" (UniqueName: \"kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.178357 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.178915 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.178939 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.186323 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.188190 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.199970 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.229116 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9lbt\" (UniqueName: \"kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt\") pod \"redhat-operators-pkkpx\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.280113 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.280204 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhqfx\" (UniqueName: \"kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.280254 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.312005 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.382146 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhqfx\" (UniqueName: \"kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.382225 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.382321 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.382751 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.383355 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.413447 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhqfx\" (UniqueName: \"kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx\") pod \"certified-operators-k6dh4\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.505398 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:07 crc kubenswrapper[4842]: I1111 14:20:07.874706 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.036030 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:08 crc kubenswrapper[4842]: W1111 14:20:08.036855 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32e84cb1_001f_4df1_a832_5475272231ce.slice/crio-2bb425d0ce17efa8227f5f56b3472a3f3ef492a70457a217fd1d7f0501d5e88b WatchSource:0}: Error finding container 2bb425d0ce17efa8227f5f56b3472a3f3ef492a70457a217fd1d7f0501d5e88b: Status 404 returned error can't find the container with id 2bb425d0ce17efa8227f5f56b3472a3f3ef492a70457a217fd1d7f0501d5e88b Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.256588 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.256626 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.346863 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.720547 4842 generic.go:334] "Generic (PLEG): container finished" podID="32e84cb1-001f-4df1-a832-5475272231ce" containerID="b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685" exitCode=0 Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.720660 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerDied","Data":"b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685"} Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.721301 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerStarted","Data":"2bb425d0ce17efa8227f5f56b3472a3f3ef492a70457a217fd1d7f0501d5e88b"} Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.723564 4842 generic.go:334] "Generic (PLEG): container finished" podID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerID="461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd" exitCode=0 Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.723652 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerDied","Data":"461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd"} Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.723689 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerStarted","Data":"334a41c549541d3deac8b44e09965b9803326e9d620f78cea17715980c0203e7"} Nov 11 14:20:08 crc kubenswrapper[4842]: I1111 14:20:08.769458 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:09 crc kubenswrapper[4842]: I1111 14:20:09.580256 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:20:09 crc kubenswrapper[4842]: I1111 14:20:09.734180 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerStarted","Data":"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1"} Nov 11 14:20:09 crc kubenswrapper[4842]: I1111 14:20:09.736145 4842 generic.go:334] "Generic (PLEG): container finished" podID="32e84cb1-001f-4df1-a832-5475272231ce" containerID="5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1" exitCode=0 Nov 11 14:20:09 crc kubenswrapper[4842]: I1111 14:20:09.736259 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerDied","Data":"5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1"} Nov 11 14:20:10 crc kubenswrapper[4842]: I1111 14:20:10.746439 4842 generic.go:334] "Generic (PLEG): container finished" podID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerID="6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1" exitCode=0 Nov 11 14:20:10 crc kubenswrapper[4842]: I1111 14:20:10.746650 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerDied","Data":"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1"} Nov 11 14:20:10 crc kubenswrapper[4842]: I1111 14:20:10.750457 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6xgv2" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="registry-server" containerID="cri-o://bbdcd96bb99a6b84e36c7cee947a9cafc3402a5cc4e79184f5c18bc0bd1b165f" gracePeriod=2 Nov 11 14:20:10 crc kubenswrapper[4842]: I1111 14:20:10.750649 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerStarted","Data":"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c"} Nov 11 14:20:10 crc kubenswrapper[4842]: I1111 14:20:10.803159 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k6dh4" podStartSLOduration=2.177308758 podStartE2EDuration="3.803143036s" podCreationTimestamp="2025-11-11 14:20:07 +0000 UTC" firstStartedPulling="2025-11-11 14:20:08.722508328 +0000 UTC m=+3019.382797947" lastFinishedPulling="2025-11-11 14:20:10.348342606 +0000 UTC m=+3021.008632225" observedRunningTime="2025-11-11 14:20:10.802612189 +0000 UTC m=+3021.462901808" watchObservedRunningTime="2025-11-11 14:20:10.803143036 +0000 UTC m=+3021.463432655" Nov 11 14:20:11 crc kubenswrapper[4842]: I1111 14:20:11.761765 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerStarted","Data":"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7"} Nov 11 14:20:11 crc kubenswrapper[4842]: I1111 14:20:11.780965 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pkkpx" podStartSLOduration=3.107049507 podStartE2EDuration="5.780950219s" podCreationTimestamp="2025-11-11 14:20:06 +0000 UTC" firstStartedPulling="2025-11-11 14:20:08.725079938 +0000 UTC m=+3019.385369567" lastFinishedPulling="2025-11-11 14:20:11.39898066 +0000 UTC m=+3022.059270279" observedRunningTime="2025-11-11 14:20:11.778729031 +0000 UTC m=+3022.439018670" watchObservedRunningTime="2025-11-11 14:20:11.780950219 +0000 UTC m=+3022.441239838" Nov 11 14:20:12 crc kubenswrapper[4842]: I1111 14:20:12.775702 4842 generic.go:334] "Generic (PLEG): container finished" podID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerID="bbdcd96bb99a6b84e36c7cee947a9cafc3402a5cc4e79184f5c18bc0bd1b165f" exitCode=0 Nov 11 14:20:12 crc kubenswrapper[4842]: I1111 14:20:12.777257 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerDied","Data":"bbdcd96bb99a6b84e36c7cee947a9cafc3402a5cc4e79184f5c18bc0bd1b165f"} Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.029011 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.199952 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities\") pod \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.200089 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content\") pod \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.200184 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmsdd\" (UniqueName: \"kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd\") pod \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\" (UID: \"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a\") " Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.200878 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities" (OuterVolumeSpecName: "utilities") pod "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" (UID: "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.209385 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd" (OuterVolumeSpecName: "kube-api-access-gmsdd") pod "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" (UID: "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a"). InnerVolumeSpecName "kube-api-access-gmsdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.214863 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" (UID: "71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.302622 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.302672 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmsdd\" (UniqueName: \"kubernetes.io/projected/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-kube-api-access-gmsdd\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.302686 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.790926 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6xgv2" event={"ID":"71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a","Type":"ContainerDied","Data":"a828d7b95324d9ebbf00096a7b1578e685a6c9d57b5a1505d51c2f476a28d164"} Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.791005 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6xgv2" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.791007 4842 scope.go:117] "RemoveContainer" containerID="bbdcd96bb99a6b84e36c7cee947a9cafc3402a5cc4e79184f5c18bc0bd1b165f" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.812326 4842 scope.go:117] "RemoveContainer" containerID="64085c20c446fdd683a9fdc85676034691e948d21c7d71f5c4c42b193d90a0eb" Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.834344 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.843963 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6xgv2"] Nov 11 14:20:13 crc kubenswrapper[4842]: I1111 14:20:13.849816 4842 scope.go:117] "RemoveContainer" containerID="9874a312801dd406c7451218bd5e13285f1d122b3ec28a168c62cbb3f4cf3ffe" Nov 11 14:20:14 crc kubenswrapper[4842]: I1111 14:20:14.071674 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" path="/var/lib/kubelet/pods/71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a/volumes" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.313157 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.313527 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.355212 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.507535 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.507872 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.561661 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.878507 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:17 crc kubenswrapper[4842]: I1111 14:20:17.879398 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:19 crc kubenswrapper[4842]: I1111 14:20:19.985577 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:19 crc kubenswrapper[4842]: I1111 14:20:19.986231 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pkkpx" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="registry-server" containerID="cri-o://17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7" gracePeriod=2 Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.185502 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.186137 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k6dh4" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="registry-server" containerID="cri-o://40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c" gracePeriod=2 Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.484807 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.546270 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9lbt\" (UniqueName: \"kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt\") pod \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.553155 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt" (OuterVolumeSpecName: "kube-api-access-t9lbt") pod "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" (UID: "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce"). InnerVolumeSpecName "kube-api-access-t9lbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.599347 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648036 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities\") pod \"32e84cb1-001f-4df1-a832-5475272231ce\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648290 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhqfx\" (UniqueName: \"kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx\") pod \"32e84cb1-001f-4df1-a832-5475272231ce\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648375 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content\") pod \"32e84cb1-001f-4df1-a832-5475272231ce\" (UID: \"32e84cb1-001f-4df1-a832-5475272231ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648440 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities\") pod \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648553 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content\") pod \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\" (UID: \"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce\") " Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648939 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities" (OuterVolumeSpecName: "utilities") pod "32e84cb1-001f-4df1-a832-5475272231ce" (UID: "32e84cb1-001f-4df1-a832-5475272231ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.648994 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9lbt\" (UniqueName: \"kubernetes.io/projected/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-kube-api-access-t9lbt\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.649544 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities" (OuterVolumeSpecName: "utilities") pod "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" (UID: "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.651510 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx" (OuterVolumeSpecName: "kube-api-access-qhqfx") pod "32e84cb1-001f-4df1-a832-5475272231ce" (UID: "32e84cb1-001f-4df1-a832-5475272231ce"). InnerVolumeSpecName "kube-api-access-qhqfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.691805 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32e84cb1-001f-4df1-a832-5475272231ce" (UID: "32e84cb1-001f-4df1-a832-5475272231ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.740364 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" (UID: "792958cf-77ab-4b8f-a6b2-32ad02e0d3ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.750518 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.750555 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.750564 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhqfx\" (UniqueName: \"kubernetes.io/projected/32e84cb1-001f-4df1-a832-5475272231ce-kube-api-access-qhqfx\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.750573 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32e84cb1-001f-4df1-a832-5475272231ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.750581 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.856857 4842 generic.go:334] "Generic (PLEG): container finished" podID="32e84cb1-001f-4df1-a832-5475272231ce" containerID="40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c" exitCode=0 Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.856996 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6dh4" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.856995 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerDied","Data":"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c"} Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.857533 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6dh4" event={"ID":"32e84cb1-001f-4df1-a832-5475272231ce","Type":"ContainerDied","Data":"2bb425d0ce17efa8227f5f56b3472a3f3ef492a70457a217fd1d7f0501d5e88b"} Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.857561 4842 scope.go:117] "RemoveContainer" containerID="40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.859747 4842 generic.go:334] "Generic (PLEG): container finished" podID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerID="17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7" exitCode=0 Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.859790 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerDied","Data":"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7"} Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.859823 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pkkpx" event={"ID":"792958cf-77ab-4b8f-a6b2-32ad02e0d3ce","Type":"ContainerDied","Data":"334a41c549541d3deac8b44e09965b9803326e9d620f78cea17715980c0203e7"} Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.859795 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pkkpx" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.878175 4842 scope.go:117] "RemoveContainer" containerID="5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.901746 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.910014 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k6dh4"] Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.913711 4842 scope.go:117] "RemoveContainer" containerID="b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.917253 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.924044 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pkkpx"] Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.931385 4842 scope.go:117] "RemoveContainer" containerID="40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.931745 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c\": container with ID starting with 40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c not found: ID does not exist" containerID="40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.931791 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c"} err="failed to get container status \"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c\": rpc error: code = NotFound desc = could not find container \"40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c\": container with ID starting with 40284648619cbe809040860eaab67a7b750804539f4444eab43bf76d41e7e31c not found: ID does not exist" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.931816 4842 scope.go:117] "RemoveContainer" containerID="5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.932076 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1\": container with ID starting with 5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1 not found: ID does not exist" containerID="5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.932110 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1"} err="failed to get container status \"5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1\": rpc error: code = NotFound desc = could not find container \"5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1\": container with ID starting with 5a54968504e82df2640c547244838c751d151fd24ba26fa84908490fdf0ae9a1 not found: ID does not exist" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.932122 4842 scope.go:117] "RemoveContainer" containerID="b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.932405 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685\": container with ID starting with b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685 not found: ID does not exist" containerID="b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.932424 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685"} err="failed to get container status \"b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685\": rpc error: code = NotFound desc = could not find container \"b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685\": container with ID starting with b9f480389eebcc83ab882d268973d3861e23d80d0a76711bde8ba30d7834f685 not found: ID does not exist" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.932436 4842 scope.go:117] "RemoveContainer" containerID="17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.951145 4842 scope.go:117] "RemoveContainer" containerID="6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.980253 4842 scope.go:117] "RemoveContainer" containerID="461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.996227 4842 scope.go:117] "RemoveContainer" containerID="17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.996684 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7\": container with ID starting with 17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7 not found: ID does not exist" containerID="17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.996718 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7"} err="failed to get container status \"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7\": rpc error: code = NotFound desc = could not find container \"17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7\": container with ID starting with 17a67cb7dd6222b8755fa135d06aa491b964a1d5e861edc414b38b975e0108d7 not found: ID does not exist" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.996742 4842 scope.go:117] "RemoveContainer" containerID="6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.997685 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1\": container with ID starting with 6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1 not found: ID does not exist" containerID="6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.997717 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1"} err="failed to get container status \"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1\": rpc error: code = NotFound desc = could not find container \"6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1\": container with ID starting with 6a145274aa3f4e1d3c34fbdd1610d31e7282d101d6a26c7e49e46053c30c36d1 not found: ID does not exist" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.997737 4842 scope.go:117] "RemoveContainer" containerID="461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd" Nov 11 14:20:20 crc kubenswrapper[4842]: E1111 14:20:20.997943 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd\": container with ID starting with 461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd not found: ID does not exist" containerID="461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd" Nov 11 14:20:20 crc kubenswrapper[4842]: I1111 14:20:20.997976 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd"} err="failed to get container status \"461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd\": rpc error: code = NotFound desc = could not find container \"461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd\": container with ID starting with 461f1d553fdfcbb697892e696fbe083428f8e6e044db8abb5f07df09285fdcbd not found: ID does not exist" Nov 11 14:20:22 crc kubenswrapper[4842]: I1111 14:20:22.070106 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e84cb1-001f-4df1-a832-5475272231ce" path="/var/lib/kubelet/pods/32e84cb1-001f-4df1-a832-5475272231ce/volumes" Nov 11 14:20:22 crc kubenswrapper[4842]: I1111 14:20:22.071041 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" path="/var/lib/kubelet/pods/792958cf-77ab-4b8f-a6b2-32ad02e0d3ce/volumes" Nov 11 14:20:44 crc kubenswrapper[4842]: I1111 14:20:44.960994 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:20:44 crc kubenswrapper[4842]: I1111 14:20:44.961482 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:21:14 crc kubenswrapper[4842]: I1111 14:21:14.961062 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:21:14 crc kubenswrapper[4842]: I1111 14:21:14.961679 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:21:44 crc kubenswrapper[4842]: I1111 14:21:44.961221 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:21:44 crc kubenswrapper[4842]: I1111 14:21:44.961696 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:21:44 crc kubenswrapper[4842]: I1111 14:21:44.961735 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:21:44 crc kubenswrapper[4842]: I1111 14:21:44.962417 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:21:44 crc kubenswrapper[4842]: I1111 14:21:44.962467 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" gracePeriod=600 Nov 11 14:21:45 crc kubenswrapper[4842]: E1111 14:21:45.084906 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:21:45 crc kubenswrapper[4842]: I1111 14:21:45.597793 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" exitCode=0 Nov 11 14:21:45 crc kubenswrapper[4842]: I1111 14:21:45.597840 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649"} Nov 11 14:21:45 crc kubenswrapper[4842]: I1111 14:21:45.597878 4842 scope.go:117] "RemoveContainer" containerID="93843482ea3a11ffe477de0c0d8491b771069af19c3ea831c888316570f99522" Nov 11 14:21:45 crc kubenswrapper[4842]: I1111 14:21:45.598675 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:21:45 crc kubenswrapper[4842]: E1111 14:21:45.599043 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:22:01 crc kubenswrapper[4842]: I1111 14:22:01.060244 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:22:01 crc kubenswrapper[4842]: E1111 14:22:01.062300 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:22:13 crc kubenswrapper[4842]: I1111 14:22:13.882938 4842 generic.go:334] "Generic (PLEG): container finished" podID="908c0c25-452c-4fba-9fbd-d76fa35416af" containerID="aa67f9c923bfad62c7cea0ad22cce9320caae6a4153bb5664a215e3d5dc67692" exitCode=0 Nov 11 14:22:13 crc kubenswrapper[4842]: I1111 14:22:13.883202 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" event={"ID":"908c0c25-452c-4fba-9fbd-d76fa35416af","Type":"ContainerDied","Data":"aa67f9c923bfad62c7cea0ad22cce9320caae6a4153bb5664a215e3d5dc67692"} Nov 11 14:22:14 crc kubenswrapper[4842]: I1111 14:22:14.059506 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:22:14 crc kubenswrapper[4842]: E1111 14:22:14.060030 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.310638 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.444926 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.444978 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445026 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445059 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445182 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445213 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445255 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445287 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9gmx\" (UniqueName: \"kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.445379 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0\") pod \"908c0c25-452c-4fba-9fbd-d76fa35416af\" (UID: \"908c0c25-452c-4fba-9fbd-d76fa35416af\") " Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.450703 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx" (OuterVolumeSpecName: "kube-api-access-q9gmx") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "kube-api-access-q9gmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.460977 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.474584 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory" (OuterVolumeSpecName: "inventory") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.477579 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.479347 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.483369 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.485247 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.485783 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.487384 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "908c0c25-452c-4fba-9fbd-d76fa35416af" (UID: "908c0c25-452c-4fba-9fbd-d76fa35416af"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547351 4842 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547382 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547392 4842 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547401 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547410 4842 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547418 4842 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547426 4842 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547434 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9gmx\" (UniqueName: \"kubernetes.io/projected/908c0c25-452c-4fba-9fbd-d76fa35416af-kube-api-access-q9gmx\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.547441 4842 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/908c0c25-452c-4fba-9fbd-d76fa35416af-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.904784 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" event={"ID":"908c0c25-452c-4fba-9fbd-d76fa35416af","Type":"ContainerDied","Data":"9a7c1e8d07960d3173b846fcc11ba4c6f28f79e69cdc2b87f6ba4dee155a9893"} Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.904826 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a7c1e8d07960d3173b846fcc11ba4c6f28f79e69cdc2b87f6ba4dee155a9893" Nov 11 14:22:15 crc kubenswrapper[4842]: I1111 14:22:15.904830 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-65pxq" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.010971 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c"] Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011358 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011378 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011400 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011407 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011423 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011430 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011438 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="908c0c25-452c-4fba-9fbd-d76fa35416af" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011444 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="908c0c25-452c-4fba-9fbd-d76fa35416af" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011453 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011458 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011474 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011481 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011493 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011498 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011508 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011513 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011521 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011526 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="extract-content" Nov 11 14:22:16 crc kubenswrapper[4842]: E1111 14:22:16.011535 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011541 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="extract-utilities" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011727 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="908c0c25-452c-4fba-9fbd-d76fa35416af" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011741 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="792958cf-77ab-4b8f-a6b2-32ad02e0d3ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011751 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e84cb1-001f-4df1-a832-5475272231ce" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.011760 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="71ec1fe3-2ba7-4e3d-9db2-54fc1f60ee0a" containerName="registry-server" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.012446 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.025583 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.025761 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c"] Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.025847 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.025991 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.026065 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5pv8z" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.026602 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.161537 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj6gp\" (UniqueName: \"kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.161954 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.162023 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.162285 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.162331 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.162381 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.162460 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.264780 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj6gp\" (UniqueName: \"kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.264840 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.264917 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.264974 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.264994 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.265018 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.265047 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.269900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.270066 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.270732 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.271762 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.273971 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.274005 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.290732 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj6gp\" (UniqueName: \"kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.362379 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:22:16 crc kubenswrapper[4842]: I1111 14:22:16.930557 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c"] Nov 11 14:22:17 crc kubenswrapper[4842]: I1111 14:22:17.922890 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" event={"ID":"c203745d-d249-4515-ac25-d99b78d65d2e","Type":"ContainerStarted","Data":"4cf7695e59031de0ad37088d7704b7f1ea1e3d2d90bc52d80e0a08e2668109f0"} Nov 11 14:22:17 crc kubenswrapper[4842]: I1111 14:22:17.923411 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" event={"ID":"c203745d-d249-4515-ac25-d99b78d65d2e","Type":"ContainerStarted","Data":"28a96c5175475742ea0f4253365c310bf9ce41d4be43e1b6da7fdd2f5c6e55c5"} Nov 11 14:22:17 crc kubenswrapper[4842]: I1111 14:22:17.941512 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" podStartSLOduration=2.443747035 podStartE2EDuration="2.941489307s" podCreationTimestamp="2025-11-11 14:22:15 +0000 UTC" firstStartedPulling="2025-11-11 14:22:16.937131915 +0000 UTC m=+3147.597421534" lastFinishedPulling="2025-11-11 14:22:17.434874187 +0000 UTC m=+3148.095163806" observedRunningTime="2025-11-11 14:22:17.935925752 +0000 UTC m=+3148.596215381" watchObservedRunningTime="2025-11-11 14:22:17.941489307 +0000 UTC m=+3148.601778926" Nov 11 14:22:29 crc kubenswrapper[4842]: I1111 14:22:29.059385 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:22:29 crc kubenswrapper[4842]: E1111 14:22:29.060166 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:22:43 crc kubenswrapper[4842]: I1111 14:22:43.058875 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:22:43 crc kubenswrapper[4842]: E1111 14:22:43.060430 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:22:57 crc kubenswrapper[4842]: I1111 14:22:57.058805 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:22:57 crc kubenswrapper[4842]: E1111 14:22:57.060079 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:23:10 crc kubenswrapper[4842]: I1111 14:23:10.068225 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:23:10 crc kubenswrapper[4842]: E1111 14:23:10.069345 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:23:23 crc kubenswrapper[4842]: I1111 14:23:23.059562 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:23:23 crc kubenswrapper[4842]: E1111 14:23:23.060174 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:23:37 crc kubenswrapper[4842]: I1111 14:23:37.059017 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:23:37 crc kubenswrapper[4842]: E1111 14:23:37.060037 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:23:48 crc kubenswrapper[4842]: I1111 14:23:48.060150 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:23:48 crc kubenswrapper[4842]: E1111 14:23:48.061574 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:24:02 crc kubenswrapper[4842]: I1111 14:24:02.059395 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:24:02 crc kubenswrapper[4842]: E1111 14:24:02.060219 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:24:13 crc kubenswrapper[4842]: I1111 14:24:13.059073 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:24:13 crc kubenswrapper[4842]: E1111 14:24:13.059791 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:24:26 crc kubenswrapper[4842]: I1111 14:24:26.061346 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:24:26 crc kubenswrapper[4842]: E1111 14:24:26.062349 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:24:40 crc kubenswrapper[4842]: I1111 14:24:40.066915 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:24:40 crc kubenswrapper[4842]: E1111 14:24:40.068022 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:24:40 crc kubenswrapper[4842]: I1111 14:24:40.258988 4842 generic.go:334] "Generic (PLEG): container finished" podID="c203745d-d249-4515-ac25-d99b78d65d2e" containerID="4cf7695e59031de0ad37088d7704b7f1ea1e3d2d90bc52d80e0a08e2668109f0" exitCode=0 Nov 11 14:24:40 crc kubenswrapper[4842]: I1111 14:24:40.259084 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" event={"ID":"c203745d-d249-4515-ac25-d99b78d65d2e","Type":"ContainerDied","Data":"4cf7695e59031de0ad37088d7704b7f1ea1e3d2d90bc52d80e0a08e2668109f0"} Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.727201 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.918278 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.918351 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.919746 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj6gp\" (UniqueName: \"kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.920005 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.920160 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.920253 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.920349 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1\") pod \"c203745d-d249-4515-ac25-d99b78d65d2e\" (UID: \"c203745d-d249-4515-ac25-d99b78d65d2e\") " Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.924380 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp" (OuterVolumeSpecName: "kube-api-access-qj6gp") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "kube-api-access-qj6gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.934007 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.951071 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.953691 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.955383 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.961171 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory" (OuterVolumeSpecName: "inventory") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:41 crc kubenswrapper[4842]: I1111 14:24:41.967169 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "c203745d-d249-4515-ac25-d99b78d65d2e" (UID: "c203745d-d249-4515-ac25-d99b78d65d2e"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023342 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023389 4842 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023403 4842 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023414 4842 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023431 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj6gp\" (UniqueName: \"kubernetes.io/projected/c203745d-d249-4515-ac25-d99b78d65d2e-kube-api-access-qj6gp\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023440 4842 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-inventory\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.023448 4842 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c203745d-d249-4515-ac25-d99b78d65d2e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.280555 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" event={"ID":"c203745d-d249-4515-ac25-d99b78d65d2e","Type":"ContainerDied","Data":"28a96c5175475742ea0f4253365c310bf9ce41d4be43e1b6da7fdd2f5c6e55c5"} Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.280610 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28a96c5175475742ea0f4253365c310bf9ce41d4be43e1b6da7fdd2f5c6e55c5" Nov 11 14:24:42 crc kubenswrapper[4842]: I1111 14:24:42.280633 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c" Nov 11 14:24:53 crc kubenswrapper[4842]: I1111 14:24:53.059833 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:24:53 crc kubenswrapper[4842]: E1111 14:24:53.060683 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:25:07 crc kubenswrapper[4842]: I1111 14:25:07.060434 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:25:07 crc kubenswrapper[4842]: E1111 14:25:07.061396 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.327021 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: E1111 14:25:17.328170 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c203745d-d249-4515-ac25-d99b78d65d2e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.328185 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c203745d-d249-4515-ac25-d99b78d65d2e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.328363 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c203745d-d249-4515-ac25-d99b78d65d2e" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.338510 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.341055 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.367557 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.450236 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.466808 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.469838 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-nvme\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.469965 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data-custom\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470060 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqbd5\" (UniqueName: \"kubernetes.io/projected/07ee075d-6090-4f91-9908-223be5beff86-kube-api-access-fqbd5\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470115 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470153 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-run\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470224 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470303 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470377 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-sys\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470427 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.470487 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-lib-modules\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.471013 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.471133 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.476493 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-config-data" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.478671 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.478772 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-scripts\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.478901 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-dev\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.479044 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.498043 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.499867 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.503044 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-nfs-2-config-data" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.510590 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580763 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data-custom\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580806 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqbd5\" (UniqueName: \"kubernetes.io/projected/07ee075d-6090-4f91-9908-223be5beff86-kube-api-access-fqbd5\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580830 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580854 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580877 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-run\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580908 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580939 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580957 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580984 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.580999 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-sys\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581026 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581052 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581082 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-lib-modules\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581131 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581151 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-run\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581172 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-scripts\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581191 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-sys\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581211 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-dev\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581231 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9cm7\" (UniqueName: \"kubernetes.io/projected/3c06a622-086f-4df5-beaa-67d62802c249-kube-api-access-p9cm7\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581257 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581252 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-sys\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581276 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581298 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581324 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581337 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581361 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581382 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581400 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-nvme\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581414 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-dev\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581431 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.581448 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582197 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582246 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-run\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582269 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582311 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-lib-modules\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582404 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582805 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-dev\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.582947 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.583122 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.583900 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/07ee075d-6090-4f91-9908-223be5beff86-etc-nvme\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.588710 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-scripts\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.589125 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.601881 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data-custom\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.602150 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07ee075d-6090-4f91-9908-223be5beff86-config-data\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.635125 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqbd5\" (UniqueName: \"kubernetes.io/projected/07ee075d-6090-4f91-9908-223be5beff86-kube-api-access-fqbd5\") pod \"cinder-backup-0\" (UID: \"07ee075d-6090-4f91-9908-223be5beff86\") " pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683264 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683554 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g8bw\" (UniqueName: \"kubernetes.io/projected/51936d85-49d4-4413-b8f0-0c582381a663-kube-api-access-9g8bw\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683585 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683603 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683628 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683643 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-dev\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683659 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683675 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683700 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683724 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683762 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683780 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683812 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683834 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683855 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683878 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683904 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683922 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683956 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683974 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-run\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.683987 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684007 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684028 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-sys\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684041 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684069 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684083 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684113 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9cm7\" (UniqueName: \"kubernetes.io/projected/3c06a622-086f-4df5-beaa-67d62802c249-kube-api-access-p9cm7\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684143 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684218 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684247 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-run\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684347 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-lib-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684350 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-nvme\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684383 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-lib-modules\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684273 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684485 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-brick\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684501 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-dev\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684522 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-var-locks-cinder\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.684536 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-machine-id\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.685205 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-etc-iscsi\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.685608 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3c06a622-086f-4df5-beaa-67d62802c249-sys\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.688985 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.692699 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-scripts\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.693162 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.693690 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-config-data-custom\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.710323 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c06a622-086f-4df5-beaa-67d62802c249-combined-ca-bundle\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.712740 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9cm7\" (UniqueName: \"kubernetes.io/projected/3c06a622-086f-4df5-beaa-67d62802c249-kube-api-access-p9cm7\") pod \"cinder-volume-nfs-0\" (UID: \"3c06a622-086f-4df5-beaa-67d62802c249\") " pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786384 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786430 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786450 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786473 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786500 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786513 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786541 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786571 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786593 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g8bw\" (UniqueName: \"kubernetes.io/projected/51936d85-49d4-4413-b8f0-0c582381a663-kube-api-access-9g8bw\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786640 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786684 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786699 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786717 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786732 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-iscsi\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786769 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786744 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-dev\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786774 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-nvme\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786792 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-etc-machine-id\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786813 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-run\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786825 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786822 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-lib-modules\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786825 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.786881 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-sys\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.787224 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-locks-brick\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.787488 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/51936d85-49d4-4413-b8f0-0c582381a663-var-lib-cinder\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.790758 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data-custom\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.791023 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-config-data\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.791194 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-combined-ca-bundle\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.792392 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/51936d85-49d4-4413-b8f0-0c582381a663-scripts\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.806799 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.810902 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g8bw\" (UniqueName: \"kubernetes.io/projected/51936d85-49d4-4413-b8f0-0c582381a663-kube-api-access-9g8bw\") pod \"cinder-volume-nfs-2-0\" (UID: \"51936d85-49d4-4413-b8f0-0c582381a663\") " pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:17 crc kubenswrapper[4842]: I1111 14:25:17.832749 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.332999 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 11 14:25:18 crc kubenswrapper[4842]: W1111 14:25:18.342258 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07ee075d_6090_4f91_9908_223be5beff86.slice/crio-30676e06ab31572b87618d732e8794bd7de8011016f4e97ab955f80f84972d8f WatchSource:0}: Error finding container 30676e06ab31572b87618d732e8794bd7de8011016f4e97ab955f80f84972d8f: Status 404 returned error can't find the container with id 30676e06ab31572b87618d732e8794bd7de8011016f4e97ab955f80f84972d8f Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.346502 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.463552 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-2-0"] Nov 11 14:25:18 crc kubenswrapper[4842]: W1111 14:25:18.507813 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51936d85_49d4_4413_b8f0_0c582381a663.slice/crio-189a831bb995965797fe829ff3b0c8037c66af6ce56a7956369b66e3c8270478 WatchSource:0}: Error finding container 189a831bb995965797fe829ff3b0c8037c66af6ce56a7956369b66e3c8270478: Status 404 returned error can't find the container with id 189a831bb995965797fe829ff3b0c8037c66af6ce56a7956369b66e3c8270478 Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.545395 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-nfs-0"] Nov 11 14:25:18 crc kubenswrapper[4842]: W1111 14:25:18.563033 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c06a622_086f_4df5_beaa_67d62802c249.slice/crio-96d158451b4b8a93d0b38642b3609215de9bfdb8dc399342bb4d41a3e4d514b9 WatchSource:0}: Error finding container 96d158451b4b8a93d0b38642b3609215de9bfdb8dc399342bb4d41a3e4d514b9: Status 404 returned error can't find the container with id 96d158451b4b8a93d0b38642b3609215de9bfdb8dc399342bb4d41a3e4d514b9 Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.658303 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"51936d85-49d4-4413-b8f0-0c582381a663","Type":"ContainerStarted","Data":"189a831bb995965797fe829ff3b0c8037c66af6ce56a7956369b66e3c8270478"} Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.659812 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"3c06a622-086f-4df5-beaa-67d62802c249","Type":"ContainerStarted","Data":"96d158451b4b8a93d0b38642b3609215de9bfdb8dc399342bb4d41a3e4d514b9"} Nov 11 14:25:18 crc kubenswrapper[4842]: I1111 14:25:18.661286 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"07ee075d-6090-4f91-9908-223be5beff86","Type":"ContainerStarted","Data":"30676e06ab31572b87618d732e8794bd7de8011016f4e97ab955f80f84972d8f"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.673578 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"51936d85-49d4-4413-b8f0-0c582381a663","Type":"ContainerStarted","Data":"406f44f7501a9b67d62b09d57dac7133f7a9241043a15b319db78e47d4e9f32b"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.674246 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-2-0" event={"ID":"51936d85-49d4-4413-b8f0-0c582381a663","Type":"ContainerStarted","Data":"53fba28400ab6e1fc2107edc1d09d0a6a0095c3296ab77567dbcf5e7ba06fce2"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.676090 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"3c06a622-086f-4df5-beaa-67d62802c249","Type":"ContainerStarted","Data":"08c18419963c9abb2892619f619825ca572a276b9ac8ad6945f7456198b512b7"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.676139 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-nfs-0" event={"ID":"3c06a622-086f-4df5-beaa-67d62802c249","Type":"ContainerStarted","Data":"b9a32b8af966020b2770b14c91fd1ccc19ae560fe9b2aa131f1b439e6b8a1f28"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.678516 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"07ee075d-6090-4f91-9908-223be5beff86","Type":"ContainerStarted","Data":"af35a460b819ca4b86e5c0f19bcaef0d66f42d7a45a4b1a6ad8eecc8926f6208"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.678558 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"07ee075d-6090-4f91-9908-223be5beff86","Type":"ContainerStarted","Data":"ac008dc0d1d063489471d5ac3466b0362b0b1593e82d58955cb6017a1ccdd8dd"} Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.706088 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-2-0" podStartSLOduration=2.462848754 podStartE2EDuration="2.706068028s" podCreationTimestamp="2025-11-11 14:25:17 +0000 UTC" firstStartedPulling="2025-11-11 14:25:18.510007958 +0000 UTC m=+3329.170297577" lastFinishedPulling="2025-11-11 14:25:18.753227232 +0000 UTC m=+3329.413516851" observedRunningTime="2025-11-11 14:25:19.697542221 +0000 UTC m=+3330.357831830" watchObservedRunningTime="2025-11-11 14:25:19.706068028 +0000 UTC m=+3330.366357647" Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.736436 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-nfs-0" podStartSLOduration=2.5460908030000002 podStartE2EDuration="2.736418599s" podCreationTimestamp="2025-11-11 14:25:17 +0000 UTC" firstStartedPulling="2025-11-11 14:25:18.56687159 +0000 UTC m=+3329.227161219" lastFinishedPulling="2025-11-11 14:25:18.757199406 +0000 UTC m=+3329.417489015" observedRunningTime="2025-11-11 14:25:19.727999645 +0000 UTC m=+3330.388289275" watchObservedRunningTime="2025-11-11 14:25:19.736418599 +0000 UTC m=+3330.396708218" Nov 11 14:25:19 crc kubenswrapper[4842]: I1111 14:25:19.758505 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.415289915 podStartE2EDuration="2.758482922s" podCreationTimestamp="2025-11-11 14:25:17 +0000 UTC" firstStartedPulling="2025-11-11 14:25:18.346261515 +0000 UTC m=+3329.006551134" lastFinishedPulling="2025-11-11 14:25:18.689454522 +0000 UTC m=+3329.349744141" observedRunningTime="2025-11-11 14:25:19.749603343 +0000 UTC m=+3330.409892962" watchObservedRunningTime="2025-11-11 14:25:19.758482922 +0000 UTC m=+3330.418772541" Nov 11 14:25:21 crc kubenswrapper[4842]: I1111 14:25:21.059508 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:25:21 crc kubenswrapper[4842]: E1111 14:25:21.060579 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:25:22 crc kubenswrapper[4842]: I1111 14:25:22.689763 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 11 14:25:22 crc kubenswrapper[4842]: I1111 14:25:22.807973 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:22 crc kubenswrapper[4842]: I1111 14:25:22.833920 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:27 crc kubenswrapper[4842]: I1111 14:25:27.878804 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 11 14:25:28 crc kubenswrapper[4842]: I1111 14:25:28.026047 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-0" Nov 11 14:25:28 crc kubenswrapper[4842]: I1111 14:25:28.119557 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-nfs-2-0" Nov 11 14:25:35 crc kubenswrapper[4842]: I1111 14:25:35.060041 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:25:35 crc kubenswrapper[4842]: E1111 14:25:35.060975 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:25:46 crc kubenswrapper[4842]: I1111 14:25:46.059966 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:25:46 crc kubenswrapper[4842]: E1111 14:25:46.060919 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:26:01 crc kubenswrapper[4842]: I1111 14:26:01.060172 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:26:01 crc kubenswrapper[4842]: E1111 14:26:01.060977 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:26:12 crc kubenswrapper[4842]: I1111 14:26:12.060252 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:26:12 crc kubenswrapper[4842]: E1111 14:26:12.061195 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:26:20 crc kubenswrapper[4842]: I1111 14:26:20.500083 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:20 crc kubenswrapper[4842]: I1111 14:26:20.500858 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="prometheus" containerID="cri-o://f6860d6673ae88b1d5b2865d3976e9d52bb6733abf0b594b74401cf811c28168" gracePeriod=600 Nov 11 14:26:20 crc kubenswrapper[4842]: I1111 14:26:20.501277 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="thanos-sidecar" containerID="cri-o://d032234bf282adbe08f9d91d7141b25c1a3b2851b0d5bc13405206a923e99d55" gracePeriod=600 Nov 11 14:26:20 crc kubenswrapper[4842]: I1111 14:26:20.501338 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="config-reloader" containerID="cri-o://3f2ab27f69be57f3b89d410b3aafeb0f9f25b4ddfe1983376d0eb85cbc50270c" gracePeriod=600 Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284538 4842 generic.go:334] "Generic (PLEG): container finished" podID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerID="d032234bf282adbe08f9d91d7141b25c1a3b2851b0d5bc13405206a923e99d55" exitCode=0 Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284851 4842 generic.go:334] "Generic (PLEG): container finished" podID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerID="3f2ab27f69be57f3b89d410b3aafeb0f9f25b4ddfe1983376d0eb85cbc50270c" exitCode=0 Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284862 4842 generic.go:334] "Generic (PLEG): container finished" podID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerID="f6860d6673ae88b1d5b2865d3976e9d52bb6733abf0b594b74401cf811c28168" exitCode=0 Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284632 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerDied","Data":"d032234bf282adbe08f9d91d7141b25c1a3b2851b0d5bc13405206a923e99d55"} Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284898 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerDied","Data":"3f2ab27f69be57f3b89d410b3aafeb0f9f25b4ddfe1983376d0eb85cbc50270c"} Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.284914 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerDied","Data":"f6860d6673ae88b1d5b2865d3976e9d52bb6733abf0b594b74401cf811c28168"} Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.615527 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.753756 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.753818 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.753882 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.753934 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.753996 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754017 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754155 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754180 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pwhl\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754224 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754334 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754360 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out\") pod \"05ca3203-5029-40ea-a623-f2bf653c0af2\" (UID: \"05ca3203-5029-40ea-a623-f2bf653c0af2\") " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.754759 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.762306 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config" (OuterVolumeSpecName: "config") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.762329 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out" (OuterVolumeSpecName: "config-out") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.762415 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.764423 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.777836 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.782410 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.783039 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.790397 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl" (OuterVolumeSpecName: "kube-api-access-9pwhl") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "kube-api-access-9pwhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.803541 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "pvc-41d5652c-383b-4bea-9fe3-3f23d692956d". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.850975 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config" (OuterVolumeSpecName: "web-config") pod "05ca3203-5029-40ea-a623-f2bf653c0af2" (UID: "05ca3203-5029-40ea-a623-f2bf653c0af2"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857702 4842 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-config\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857759 4842 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857774 4842 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/05ca3203-5029-40ea-a623-f2bf653c0af2-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857787 4842 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857800 4842 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857813 4842 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857861 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") on node \"crc\" " Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857876 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pwhl\" (UniqueName: \"kubernetes.io/projected/05ca3203-5029-40ea-a623-f2bf653c0af2-kube-api-access-9pwhl\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857903 4842 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857915 4842 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05ca3203-5029-40ea-a623-f2bf653c0af2-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.857927 4842 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/05ca3203-5029-40ea-a623-f2bf653c0af2-config-out\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.894080 4842 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.894953 4842 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-41d5652c-383b-4bea-9fe3-3f23d692956d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d") on node "crc" Nov 11 14:26:21 crc kubenswrapper[4842]: I1111 14:26:21.960628 4842 reconciler_common.go:293] "Volume detached for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") on node \"crc\" DevicePath \"\"" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.298266 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"05ca3203-5029-40ea-a623-f2bf653c0af2","Type":"ContainerDied","Data":"76cd89d997b20925b27b9326d48b1f998630fb436dc0e0266fdaeb19814ad7e1"} Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.298790 4842 scope.go:117] "RemoveContainer" containerID="d032234bf282adbe08f9d91d7141b25c1a3b2851b0d5bc13405206a923e99d55" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.298367 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.329456 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.333872 4842 scope.go:117] "RemoveContainer" containerID="3f2ab27f69be57f3b89d410b3aafeb0f9f25b4ddfe1983376d0eb85cbc50270c" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.344824 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.360283 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:22 crc kubenswrapper[4842]: E1111 14:26:22.361271 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="thanos-sidecar" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361294 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="thanos-sidecar" Nov 11 14:26:22 crc kubenswrapper[4842]: E1111 14:26:22.361312 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="prometheus" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361319 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="prometheus" Nov 11 14:26:22 crc kubenswrapper[4842]: E1111 14:26:22.361329 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="init-config-reloader" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361337 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="init-config-reloader" Nov 11 14:26:22 crc kubenswrapper[4842]: E1111 14:26:22.361363 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="config-reloader" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361369 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="config-reloader" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361562 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="thanos-sidecar" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361596 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="prometheus" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.361609 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" containerName="config-reloader" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.364085 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.365339 4842 scope.go:117] "RemoveContainer" containerID="f6860d6673ae88b1d5b2865d3976e9d52bb6733abf0b594b74401cf811c28168" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.371546 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.371723 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.371852 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.372151 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.372327 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-tcncx" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.381506 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.395760 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.421055 4842 scope.go:117] "RemoveContainer" containerID="d2b3e38a69dea58af8fe45e7ba25d0aa771a93d5d3323387829134ce78fc8a85" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471495 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471550 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471582 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/70812af6-a8c3-4e0c-93b6-017fd4117173-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471612 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471658 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471684 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glgn7\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-kube-api-access-glgn7\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471702 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471728 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471762 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471783 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/70812af6-a8c3-4e0c-93b6-017fd4117173-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.471804 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573306 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573386 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glgn7\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-kube-api-access-glgn7\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573410 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573443 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573492 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573518 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/70812af6-a8c3-4e0c-93b6-017fd4117173-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573548 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573655 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573686 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573712 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/70812af6-a8c3-4e0c-93b6-017fd4117173-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.573747 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.575934 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/70812af6-a8c3-4e0c-93b6-017fd4117173-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.590948 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.591357 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/70812af6-a8c3-4e0c-93b6-017fd4117173-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.592751 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.595303 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.595655 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.603909 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.603913 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.604343 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/70812af6-a8c3-4e0c-93b6-017fd4117173-config\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.608751 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glgn7\" (UniqueName: \"kubernetes.io/projected/70812af6-a8c3-4e0c-93b6-017fd4117173-kube-api-access-glgn7\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.609832 4842 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.609880 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6d1d4d77a4f1fd1afa0791ead5af16f820dac5d1fa2885ec7edd11054a9ebc3/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.654855 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-41d5652c-383b-4bea-9fe3-3f23d692956d\") pod \"prometheus-metric-storage-0\" (UID: \"70812af6-a8c3-4e0c-93b6-017fd4117173\") " pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:22 crc kubenswrapper[4842]: I1111 14:26:22.699045 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:23 crc kubenswrapper[4842]: I1111 14:26:23.225467 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 11 14:26:23 crc kubenswrapper[4842]: I1111 14:26:23.331175 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerStarted","Data":"824250f02d5a99e9575f10b0e250817dcdc3e19974bb8238af19d73339608d3a"} Nov 11 14:26:24 crc kubenswrapper[4842]: I1111 14:26:24.059277 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:26:24 crc kubenswrapper[4842]: E1111 14:26:24.061332 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:26:24 crc kubenswrapper[4842]: I1111 14:26:24.073050 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05ca3203-5029-40ea-a623-f2bf653c0af2" path="/var/lib/kubelet/pods/05ca3203-5029-40ea-a623-f2bf653c0af2/volumes" Nov 11 14:26:27 crc kubenswrapper[4842]: I1111 14:26:27.364274 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerStarted","Data":"96d918dde7d8ede84be122d7985453301d3d03535c7a46f99684167666526e58"} Nov 11 14:26:34 crc kubenswrapper[4842]: I1111 14:26:34.423566 4842 generic.go:334] "Generic (PLEG): container finished" podID="70812af6-a8c3-4e0c-93b6-017fd4117173" containerID="96d918dde7d8ede84be122d7985453301d3d03535c7a46f99684167666526e58" exitCode=0 Nov 11 14:26:34 crc kubenswrapper[4842]: I1111 14:26:34.423663 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerDied","Data":"96d918dde7d8ede84be122d7985453301d3d03535c7a46f99684167666526e58"} Nov 11 14:26:35 crc kubenswrapper[4842]: I1111 14:26:35.436833 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerStarted","Data":"142a2d9cab36f3866090162c42578949c41592320e5d22d54fd6b5a4740a7faf"} Nov 11 14:26:37 crc kubenswrapper[4842]: I1111 14:26:37.060022 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:26:37 crc kubenswrapper[4842]: E1111 14:26:37.060598 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:26:38 crc kubenswrapper[4842]: I1111 14:26:38.466762 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerStarted","Data":"364062c79d305edff173abab1002895a9526e0351199cf2e742c4aef44006b0e"} Nov 11 14:26:38 crc kubenswrapper[4842]: I1111 14:26:38.467294 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"70812af6-a8c3-4e0c-93b6-017fd4117173","Type":"ContainerStarted","Data":"dda03335d4a6f788848d9ced60f2c34dd0612c235a226a9c99b7ee34742fa7e8"} Nov 11 14:26:38 crc kubenswrapper[4842]: I1111 14:26:38.510744 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.510713843 podStartE2EDuration="16.510713843s" podCreationTimestamp="2025-11-11 14:26:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:26:38.505049567 +0000 UTC m=+3409.165339206" watchObservedRunningTime="2025-11-11 14:26:38.510713843 +0000 UTC m=+3409.171003472" Nov 11 14:26:42 crc kubenswrapper[4842]: I1111 14:26:42.699838 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:52 crc kubenswrapper[4842]: I1111 14:26:52.059499 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:26:52 crc kubenswrapper[4842]: I1111 14:26:52.583155 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a"} Nov 11 14:26:52 crc kubenswrapper[4842]: I1111 14:26:52.699651 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:52 crc kubenswrapper[4842]: I1111 14:26:52.705367 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 11 14:26:53 crc kubenswrapper[4842]: I1111 14:26:53.598194 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.363090 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.365511 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.365609 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.382501 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.382938 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.383133 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.383935 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmr8c" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.384737 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.384872 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.384975 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.486861 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.486910 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487035 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487118 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487205 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487380 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487586 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bg25\" (UniqueName: \"kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487682 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.487761 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.488587 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.489436 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.497308 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.589239 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bg25\" (UniqueName: \"kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.589775 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.589889 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.589953 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.590074 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.590176 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.590245 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.590579 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.591139 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.596412 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.597732 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.611994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bg25\" (UniqueName: \"kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.621092 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"tempest-tests-tempest\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " pod="openstack/tempest-tests-tempest" Nov 11 14:27:14 crc kubenswrapper[4842]: I1111 14:27:14.713631 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 11 14:27:15 crc kubenswrapper[4842]: I1111 14:27:15.186745 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 11 14:27:15 crc kubenswrapper[4842]: I1111 14:27:15.839494 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"42e4762f-5636-4ea5-914b-142ccc708e6d","Type":"ContainerStarted","Data":"1c5ceae2b0a02da90d37137501a505192a38dd3e16745cd8f93da2581da2bc59"} Nov 11 14:27:25 crc kubenswrapper[4842]: I1111 14:27:25.957813 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"42e4762f-5636-4ea5-914b-142ccc708e6d","Type":"ContainerStarted","Data":"1b49f23b7a1bdbf364349be3fcb6f14dd7429b6aabfab807669c9b5edb1a5f84"} Nov 11 14:27:25 crc kubenswrapper[4842]: I1111 14:27:25.994921 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.569362436 podStartE2EDuration="12.994895534s" podCreationTimestamp="2025-11-11 14:27:13 +0000 UTC" firstStartedPulling="2025-11-11 14:27:15.193619021 +0000 UTC m=+3445.853908650" lastFinishedPulling="2025-11-11 14:27:24.619152129 +0000 UTC m=+3455.279441748" observedRunningTime="2025-11-11 14:27:25.973672902 +0000 UTC m=+3456.633962521" watchObservedRunningTime="2025-11-11 14:27:25.994895534 +0000 UTC m=+3456.655185163" Nov 11 14:29:14 crc kubenswrapper[4842]: I1111 14:29:14.961211 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:29:14 crc kubenswrapper[4842]: I1111 14:29:14.962037 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:29:44 crc kubenswrapper[4842]: I1111 14:29:44.961406 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:29:44 crc kubenswrapper[4842]: I1111 14:29:44.962182 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.165382 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw"] Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.167396 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.174542 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.177800 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95zm4\" (UniqueName: \"kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.177940 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.177971 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.178250 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.190029 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw"] Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.280179 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95zm4\" (UniqueName: \"kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.280451 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.280501 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.281484 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.299525 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.301950 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95zm4\" (UniqueName: \"kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4\") pod \"collect-profiles-29381190-8nplw\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:00 crc kubenswrapper[4842]: I1111 14:30:00.487290 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:01 crc kubenswrapper[4842]: I1111 14:30:01.057234 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw"] Nov 11 14:30:01 crc kubenswrapper[4842]: I1111 14:30:01.473023 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" event={"ID":"99401aeb-e1d9-49b0-a8c9-f00396fe6b45","Type":"ContainerStarted","Data":"105e727b8bae5c32aabfc19d54377049f10ae20cc330e4998c28480ad96eb4f9"} Nov 11 14:30:01 crc kubenswrapper[4842]: I1111 14:30:01.473420 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" event={"ID":"99401aeb-e1d9-49b0-a8c9-f00396fe6b45","Type":"ContainerStarted","Data":"4231217abeceb53d14500f1dd3091e190a3d0fc5460c0926432643ed381f62e2"} Nov 11 14:30:01 crc kubenswrapper[4842]: I1111 14:30:01.491373 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" podStartSLOduration=1.491356497 podStartE2EDuration="1.491356497s" podCreationTimestamp="2025-11-11 14:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:30:01.486375842 +0000 UTC m=+3612.146665461" watchObservedRunningTime="2025-11-11 14:30:01.491356497 +0000 UTC m=+3612.151646116" Nov 11 14:30:02 crc kubenswrapper[4842]: I1111 14:30:02.486032 4842 generic.go:334] "Generic (PLEG): container finished" podID="99401aeb-e1d9-49b0-a8c9-f00396fe6b45" containerID="105e727b8bae5c32aabfc19d54377049f10ae20cc330e4998c28480ad96eb4f9" exitCode=0 Nov 11 14:30:02 crc kubenswrapper[4842]: I1111 14:30:02.486141 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" event={"ID":"99401aeb-e1d9-49b0-a8c9-f00396fe6b45","Type":"ContainerDied","Data":"105e727b8bae5c32aabfc19d54377049f10ae20cc330e4998c28480ad96eb4f9"} Nov 11 14:30:03 crc kubenswrapper[4842]: I1111 14:30:03.928930 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.056402 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume\") pod \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.056460 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume\") pod \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.056542 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95zm4\" (UniqueName: \"kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4\") pod \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\" (UID: \"99401aeb-e1d9-49b0-a8c9-f00396fe6b45\") " Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.057316 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume" (OuterVolumeSpecName: "config-volume") pod "99401aeb-e1d9-49b0-a8c9-f00396fe6b45" (UID: "99401aeb-e1d9-49b0-a8c9-f00396fe6b45"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.064314 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "99401aeb-e1d9-49b0-a8c9-f00396fe6b45" (UID: "99401aeb-e1d9-49b0-a8c9-f00396fe6b45"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.065322 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4" (OuterVolumeSpecName: "kube-api-access-95zm4") pod "99401aeb-e1d9-49b0-a8c9-f00396fe6b45" (UID: "99401aeb-e1d9-49b0-a8c9-f00396fe6b45"). InnerVolumeSpecName "kube-api-access-95zm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.159001 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95zm4\" (UniqueName: \"kubernetes.io/projected/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-kube-api-access-95zm4\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.159394 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.159411 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/99401aeb-e1d9-49b0-a8c9-f00396fe6b45-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.508305 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" event={"ID":"99401aeb-e1d9-49b0-a8c9-f00396fe6b45","Type":"ContainerDied","Data":"4231217abeceb53d14500f1dd3091e190a3d0fc5460c0926432643ed381f62e2"} Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.508341 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4231217abeceb53d14500f1dd3091e190a3d0fc5460c0926432643ed381f62e2" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.508395 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw" Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.558607 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m"] Nov 11 14:30:04 crc kubenswrapper[4842]: I1111 14:30:04.570471 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381145-4hq4m"] Nov 11 14:30:06 crc kubenswrapper[4842]: I1111 14:30:06.075332 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20224b1f-f299-41c4-93b0-bea090f9c3cb" path="/var/lib/kubelet/pods/20224b1f-f299-41c4-93b0-bea090f9c3cb/volumes" Nov 11 14:30:14 crc kubenswrapper[4842]: I1111 14:30:14.961119 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:30:14 crc kubenswrapper[4842]: I1111 14:30:14.961855 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:30:14 crc kubenswrapper[4842]: I1111 14:30:14.961902 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:30:14 crc kubenswrapper[4842]: I1111 14:30:14.962714 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:30:14 crc kubenswrapper[4842]: I1111 14:30:14.962819 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a" gracePeriod=600 Nov 11 14:30:15 crc kubenswrapper[4842]: I1111 14:30:15.613138 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a" exitCode=0 Nov 11 14:30:15 crc kubenswrapper[4842]: I1111 14:30:15.613359 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a"} Nov 11 14:30:15 crc kubenswrapper[4842]: I1111 14:30:15.613929 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088"} Nov 11 14:30:15 crc kubenswrapper[4842]: I1111 14:30:15.614157 4842 scope.go:117] "RemoveContainer" containerID="39c8e7c8450ced3811c1ffd6e169a0bc0fa4c24d26c0783527ca13bdf308b649" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.580073 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:32 crc kubenswrapper[4842]: E1111 14:30:32.581035 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99401aeb-e1d9-49b0-a8c9-f00396fe6b45" containerName="collect-profiles" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.581048 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="99401aeb-e1d9-49b0-a8c9-f00396fe6b45" containerName="collect-profiles" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.581302 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="99401aeb-e1d9-49b0-a8c9-f00396fe6b45" containerName="collect-profiles" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.582836 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.604172 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.735223 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.735354 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.735462 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wt2b\" (UniqueName: \"kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.837448 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.837572 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wt2b\" (UniqueName: \"kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.837663 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.838059 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.838084 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.862937 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wt2b\" (UniqueName: \"kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b\") pod \"certified-operators-tjt6f\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:32 crc kubenswrapper[4842]: I1111 14:30:32.913854 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:33 crc kubenswrapper[4842]: W1111 14:30:33.510172 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12b43678_6aa0_427a_82d5_2ec117deae66.slice/crio-8ea416baa9783ba8661d247b391aee4cfcb63a4ff1f62c07dcb4686f26094d52 WatchSource:0}: Error finding container 8ea416baa9783ba8661d247b391aee4cfcb63a4ff1f62c07dcb4686f26094d52: Status 404 returned error can't find the container with id 8ea416baa9783ba8661d247b391aee4cfcb63a4ff1f62c07dcb4686f26094d52 Nov 11 14:30:33 crc kubenswrapper[4842]: I1111 14:30:33.519759 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:33 crc kubenswrapper[4842]: I1111 14:30:33.776204 4842 generic.go:334] "Generic (PLEG): container finished" podID="12b43678-6aa0-427a-82d5-2ec117deae66" containerID="f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f" exitCode=0 Nov 11 14:30:33 crc kubenswrapper[4842]: I1111 14:30:33.776274 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerDied","Data":"f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f"} Nov 11 14:30:33 crc kubenswrapper[4842]: I1111 14:30:33.776583 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerStarted","Data":"8ea416baa9783ba8661d247b391aee4cfcb63a4ff1f62c07dcb4686f26094d52"} Nov 11 14:30:33 crc kubenswrapper[4842]: I1111 14:30:33.778190 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:30:35 crc kubenswrapper[4842]: I1111 14:30:35.796147 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerStarted","Data":"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd"} Nov 11 14:30:36 crc kubenswrapper[4842]: I1111 14:30:36.808368 4842 generic.go:334] "Generic (PLEG): container finished" podID="12b43678-6aa0-427a-82d5-2ec117deae66" containerID="549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd" exitCode=0 Nov 11 14:30:36 crc kubenswrapper[4842]: I1111 14:30:36.808419 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerDied","Data":"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd"} Nov 11 14:30:37 crc kubenswrapper[4842]: I1111 14:30:37.822161 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerStarted","Data":"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb"} Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.690461 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tjt6f" podStartSLOduration=7.147746862 podStartE2EDuration="10.690442547s" podCreationTimestamp="2025-11-11 14:30:32 +0000 UTC" firstStartedPulling="2025-11-11 14:30:33.777898341 +0000 UTC m=+3644.438187960" lastFinishedPulling="2025-11-11 14:30:37.320594026 +0000 UTC m=+3647.980883645" observedRunningTime="2025-11-11 14:30:37.844562959 +0000 UTC m=+3648.504852578" watchObservedRunningTime="2025-11-11 14:30:42.690442547 +0000 UTC m=+3653.350732156" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.697469 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.703345 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.710016 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.747227 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.747313 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-226kz\" (UniqueName: \"kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.747363 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.849162 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.849226 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-226kz\" (UniqueName: \"kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.849254 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.849750 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.850002 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.870903 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-226kz\" (UniqueName: \"kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz\") pod \"redhat-marketplace-6qc2p\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.914426 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.914878 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:42 crc kubenswrapper[4842]: I1111 14:30:42.971358 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.035043 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.549405 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.883539 4842 generic.go:334] "Generic (PLEG): container finished" podID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerID="df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e" exitCode=0 Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.883590 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerDied","Data":"df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e"} Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.883643 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerStarted","Data":"f21be469391386d772214acc2634f64308000be68414788984e9ced0fe9558c8"} Nov 11 14:30:43 crc kubenswrapper[4842]: I1111 14:30:43.944431 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:44 crc kubenswrapper[4842]: I1111 14:30:44.895519 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerStarted","Data":"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e"} Nov 11 14:30:45 crc kubenswrapper[4842]: I1111 14:30:45.275444 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:45 crc kubenswrapper[4842]: I1111 14:30:45.906414 4842 generic.go:334] "Generic (PLEG): container finished" podID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerID="e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e" exitCode=0 Nov 11 14:30:45 crc kubenswrapper[4842]: I1111 14:30:45.906659 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tjt6f" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="registry-server" containerID="cri-o://385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb" gracePeriod=2 Nov 11 14:30:45 crc kubenswrapper[4842]: I1111 14:30:45.907913 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerDied","Data":"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e"} Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.431759 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.533714 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities\") pod \"12b43678-6aa0-427a-82d5-2ec117deae66\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.533762 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content\") pod \"12b43678-6aa0-427a-82d5-2ec117deae66\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.533981 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wt2b\" (UniqueName: \"kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b\") pod \"12b43678-6aa0-427a-82d5-2ec117deae66\" (UID: \"12b43678-6aa0-427a-82d5-2ec117deae66\") " Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.534935 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities" (OuterVolumeSpecName: "utilities") pod "12b43678-6aa0-427a-82d5-2ec117deae66" (UID: "12b43678-6aa0-427a-82d5-2ec117deae66"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.539972 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b" (OuterVolumeSpecName: "kube-api-access-4wt2b") pod "12b43678-6aa0-427a-82d5-2ec117deae66" (UID: "12b43678-6aa0-427a-82d5-2ec117deae66"). InnerVolumeSpecName "kube-api-access-4wt2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.581518 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12b43678-6aa0-427a-82d5-2ec117deae66" (UID: "12b43678-6aa0-427a-82d5-2ec117deae66"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.637075 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wt2b\" (UniqueName: \"kubernetes.io/projected/12b43678-6aa0-427a-82d5-2ec117deae66-kube-api-access-4wt2b\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.637187 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.637212 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12b43678-6aa0-427a-82d5-2ec117deae66-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.915247 4842 generic.go:334] "Generic (PLEG): container finished" podID="12b43678-6aa0-427a-82d5-2ec117deae66" containerID="385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb" exitCode=0 Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.915621 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerDied","Data":"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb"} Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.915647 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjt6f" event={"ID":"12b43678-6aa0-427a-82d5-2ec117deae66","Type":"ContainerDied","Data":"8ea416baa9783ba8661d247b391aee4cfcb63a4ff1f62c07dcb4686f26094d52"} Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.915663 4842 scope.go:117] "RemoveContainer" containerID="385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.915793 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjt6f" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.923837 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerStarted","Data":"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114"} Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.943902 4842 scope.go:117] "RemoveContainer" containerID="549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.952035 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6qc2p" podStartSLOduration=2.232100041 podStartE2EDuration="4.952015229s" podCreationTimestamp="2025-11-11 14:30:42 +0000 UTC" firstStartedPulling="2025-11-11 14:30:43.885784743 +0000 UTC m=+3654.546074362" lastFinishedPulling="2025-11-11 14:30:46.605699931 +0000 UTC m=+3657.265989550" observedRunningTime="2025-11-11 14:30:46.940691165 +0000 UTC m=+3657.600980784" watchObservedRunningTime="2025-11-11 14:30:46.952015229 +0000 UTC m=+3657.612304848" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.962454 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.970741 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tjt6f"] Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.974264 4842 scope.go:117] "RemoveContainer" containerID="f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.995068 4842 scope.go:117] "RemoveContainer" containerID="385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb" Nov 11 14:30:46 crc kubenswrapper[4842]: E1111 14:30:46.995615 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb\": container with ID starting with 385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb not found: ID does not exist" containerID="385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.995661 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb"} err="failed to get container status \"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb\": rpc error: code = NotFound desc = could not find container \"385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb\": container with ID starting with 385ebd4e2ef227378091e943a487f7dc0ec8334be8e8bf136676b641f1f385bb not found: ID does not exist" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.995682 4842 scope.go:117] "RemoveContainer" containerID="549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd" Nov 11 14:30:46 crc kubenswrapper[4842]: E1111 14:30:46.996179 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd\": container with ID starting with 549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd not found: ID does not exist" containerID="549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.996201 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd"} err="failed to get container status \"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd\": rpc error: code = NotFound desc = could not find container \"549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd\": container with ID starting with 549c3c365764b69cf922bafd549a62472698a94d57db552801dfc3eb576b64cd not found: ID does not exist" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.996238 4842 scope.go:117] "RemoveContainer" containerID="f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f" Nov 11 14:30:46 crc kubenswrapper[4842]: E1111 14:30:46.996554 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f\": container with ID starting with f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f not found: ID does not exist" containerID="f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f" Nov 11 14:30:46 crc kubenswrapper[4842]: I1111 14:30:46.996580 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f"} err="failed to get container status \"f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f\": rpc error: code = NotFound desc = could not find container \"f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f\": container with ID starting with f7c25d73ad516e3ea8e2ea027a5e3523d5be336a70869a18f7b17e4c80c3e82f not found: ID does not exist" Nov 11 14:30:48 crc kubenswrapper[4842]: I1111 14:30:48.071552 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" path="/var/lib/kubelet/pods/12b43678-6aa0-427a-82d5-2ec117deae66/volumes" Nov 11 14:30:53 crc kubenswrapper[4842]: I1111 14:30:53.035244 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:53 crc kubenswrapper[4842]: I1111 14:30:53.035964 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:53 crc kubenswrapper[4842]: I1111 14:30:53.087191 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:54 crc kubenswrapper[4842]: I1111 14:30:54.036392 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:54 crc kubenswrapper[4842]: I1111 14:30:54.096383 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.005888 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6qc2p" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="registry-server" containerID="cri-o://192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114" gracePeriod=2 Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.498800 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.655659 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-226kz\" (UniqueName: \"kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz\") pod \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.655773 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities\") pod \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.655831 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content\") pod \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\" (UID: \"a57e7e91-680b-47cc-8b0e-92ab15263ec5\") " Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.656648 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities" (OuterVolumeSpecName: "utilities") pod "a57e7e91-680b-47cc-8b0e-92ab15263ec5" (UID: "a57e7e91-680b-47cc-8b0e-92ab15263ec5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.666852 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz" (OuterVolumeSpecName: "kube-api-access-226kz") pod "a57e7e91-680b-47cc-8b0e-92ab15263ec5" (UID: "a57e7e91-680b-47cc-8b0e-92ab15263ec5"). InnerVolumeSpecName "kube-api-access-226kz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.684332 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a57e7e91-680b-47cc-8b0e-92ab15263ec5" (UID: "a57e7e91-680b-47cc-8b0e-92ab15263ec5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.759197 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.759240 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-226kz\" (UniqueName: \"kubernetes.io/projected/a57e7e91-680b-47cc-8b0e-92ab15263ec5-kube-api-access-226kz\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:56 crc kubenswrapper[4842]: I1111 14:30:56.759255 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a57e7e91-680b-47cc-8b0e-92ab15263ec5-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.017507 4842 generic.go:334] "Generic (PLEG): container finished" podID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerID="192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114" exitCode=0 Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.017570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerDied","Data":"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114"} Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.017582 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qc2p" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.017599 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qc2p" event={"ID":"a57e7e91-680b-47cc-8b0e-92ab15263ec5","Type":"ContainerDied","Data":"f21be469391386d772214acc2634f64308000be68414788984e9ced0fe9558c8"} Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.017635 4842 scope.go:117] "RemoveContainer" containerID="192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.040772 4842 scope.go:117] "RemoveContainer" containerID="e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.058611 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.066471 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qc2p"] Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.084384 4842 scope.go:117] "RemoveContainer" containerID="df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.122470 4842 scope.go:117] "RemoveContainer" containerID="192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114" Nov 11 14:30:57 crc kubenswrapper[4842]: E1111 14:30:57.123331 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114\": container with ID starting with 192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114 not found: ID does not exist" containerID="192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.123515 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114"} err="failed to get container status \"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114\": rpc error: code = NotFound desc = could not find container \"192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114\": container with ID starting with 192b7c62489331422311665933f3ebfdeb63b660133ac11457313dde9d9b8114 not found: ID does not exist" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.123572 4842 scope.go:117] "RemoveContainer" containerID="e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e" Nov 11 14:30:57 crc kubenswrapper[4842]: E1111 14:30:57.124108 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e\": container with ID starting with e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e not found: ID does not exist" containerID="e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.124141 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e"} err="failed to get container status \"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e\": rpc error: code = NotFound desc = could not find container \"e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e\": container with ID starting with e2e782468e6b4804236fd4726a556846936f348f020eb8b24f50784b11d1645e not found: ID does not exist" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.124166 4842 scope.go:117] "RemoveContainer" containerID="df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e" Nov 11 14:30:57 crc kubenswrapper[4842]: E1111 14:30:57.125036 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e\": container with ID starting with df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e not found: ID does not exist" containerID="df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e" Nov 11 14:30:57 crc kubenswrapper[4842]: I1111 14:30:57.125074 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e"} err="failed to get container status \"df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e\": rpc error: code = NotFound desc = could not find container \"df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e\": container with ID starting with df2a05e2a1c6393e25caab746a5e5f10f525cdba7f799cb8040ee27c1dd03c0e not found: ID does not exist" Nov 11 14:30:58 crc kubenswrapper[4842]: I1111 14:30:58.087758 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" path="/var/lib/kubelet/pods/a57e7e91-680b-47cc-8b0e-92ab15263ec5/volumes" Nov 11 14:31:01 crc kubenswrapper[4842]: I1111 14:31:01.387486 4842 scope.go:117] "RemoveContainer" containerID="60ee5d6aecbd428352a58f85820416e81f538a71580b78115e53e6aa4bb7d844" Nov 11 14:32:44 crc kubenswrapper[4842]: I1111 14:32:44.961401 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:32:44 crc kubenswrapper[4842]: I1111 14:32:44.961888 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:33:14 crc kubenswrapper[4842]: I1111 14:33:14.961733 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:33:14 crc kubenswrapper[4842]: I1111 14:33:14.962509 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:33:44 crc kubenswrapper[4842]: I1111 14:33:44.961197 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:33:44 crc kubenswrapper[4842]: I1111 14:33:44.961920 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:33:44 crc kubenswrapper[4842]: I1111 14:33:44.961984 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:33:44 crc kubenswrapper[4842]: I1111 14:33:44.962935 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:33:44 crc kubenswrapper[4842]: I1111 14:33:44.963016 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" gracePeriod=600 Nov 11 14:33:45 crc kubenswrapper[4842]: E1111 14:33:45.102434 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:33:45 crc kubenswrapper[4842]: I1111 14:33:45.630147 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" exitCode=0 Nov 11 14:33:45 crc kubenswrapper[4842]: I1111 14:33:45.630274 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088"} Nov 11 14:33:45 crc kubenswrapper[4842]: I1111 14:33:45.630668 4842 scope.go:117] "RemoveContainer" containerID="bd5a989eb137ecc693054d633887574534ee41e9806b3bb5a69ee689d0d7fb2a" Nov 11 14:33:45 crc kubenswrapper[4842]: I1111 14:33:45.631526 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:33:45 crc kubenswrapper[4842]: E1111 14:33:45.631861 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:33:57 crc kubenswrapper[4842]: I1111 14:33:57.059823 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:33:57 crc kubenswrapper[4842]: E1111 14:33:57.060601 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:34:12 crc kubenswrapper[4842]: I1111 14:34:12.059082 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:34:12 crc kubenswrapper[4842]: E1111 14:34:12.060249 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:34:23 crc kubenswrapper[4842]: I1111 14:34:23.059328 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:34:23 crc kubenswrapper[4842]: E1111 14:34:23.060213 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:34:35 crc kubenswrapper[4842]: I1111 14:34:35.074273 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:34:35 crc kubenswrapper[4842]: E1111 14:34:35.075559 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:34:49 crc kubenswrapper[4842]: I1111 14:34:49.059346 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:34:49 crc kubenswrapper[4842]: E1111 14:34:49.060041 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:35:00 crc kubenswrapper[4842]: I1111 14:35:00.066879 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:35:00 crc kubenswrapper[4842]: E1111 14:35:00.067716 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.379440 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.381335 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="extract-utilities" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.381429 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="extract-utilities" Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.381504 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.381563 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.381634 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="extract-utilities" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.381695 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="extract-utilities" Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.381762 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="extract-content" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.381819 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="extract-content" Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.381877 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="extract-content" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.381941 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="extract-content" Nov 11 14:35:04 crc kubenswrapper[4842]: E1111 14:35:04.382022 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.382112 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.382359 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="12b43678-6aa0-427a-82d5-2ec117deae66" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.382449 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a57e7e91-680b-47cc-8b0e-92ab15263ec5" containerName="registry-server" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.386257 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.409605 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.532824 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.533072 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skdzk\" (UniqueName: \"kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.533174 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.635186 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.635373 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skdzk\" (UniqueName: \"kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.635415 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.635831 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.635912 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.672223 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skdzk\" (UniqueName: \"kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk\") pod \"community-operators-4g47v\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:04 crc kubenswrapper[4842]: I1111 14:35:04.739789 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:05 crc kubenswrapper[4842]: I1111 14:35:05.315408 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:05 crc kubenswrapper[4842]: I1111 14:35:05.421910 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerStarted","Data":"7158eaf46e1b50e52542d919f4ec3cca37eb58d3f4681a8c99843c8cc631e992"} Nov 11 14:35:06 crc kubenswrapper[4842]: I1111 14:35:06.430535 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerID="c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d" exitCode=0 Nov 11 14:35:06 crc kubenswrapper[4842]: I1111 14:35:06.430579 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerDied","Data":"c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d"} Nov 11 14:35:07 crc kubenswrapper[4842]: I1111 14:35:07.443653 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerStarted","Data":"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e"} Nov 11 14:35:09 crc kubenswrapper[4842]: I1111 14:35:09.462710 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerID="a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e" exitCode=0 Nov 11 14:35:09 crc kubenswrapper[4842]: I1111 14:35:09.462751 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerDied","Data":"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e"} Nov 11 14:35:10 crc kubenswrapper[4842]: I1111 14:35:10.473441 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerStarted","Data":"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8"} Nov 11 14:35:14 crc kubenswrapper[4842]: I1111 14:35:14.059928 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:35:14 crc kubenswrapper[4842]: E1111 14:35:14.060672 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:35:14 crc kubenswrapper[4842]: I1111 14:35:14.740195 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:14 crc kubenswrapper[4842]: I1111 14:35:14.740454 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:14 crc kubenswrapper[4842]: I1111 14:35:14.788884 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:14 crc kubenswrapper[4842]: I1111 14:35:14.812343 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4g47v" podStartSLOduration=7.34608616 podStartE2EDuration="10.81232193s" podCreationTimestamp="2025-11-11 14:35:04 +0000 UTC" firstStartedPulling="2025-11-11 14:35:06.433214613 +0000 UTC m=+3917.093504232" lastFinishedPulling="2025-11-11 14:35:09.899450373 +0000 UTC m=+3920.559740002" observedRunningTime="2025-11-11 14:35:10.49117732 +0000 UTC m=+3921.151466959" watchObservedRunningTime="2025-11-11 14:35:14.81232193 +0000 UTC m=+3925.472611549" Nov 11 14:35:15 crc kubenswrapper[4842]: I1111 14:35:15.570480 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:15 crc kubenswrapper[4842]: I1111 14:35:15.636487 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:17 crc kubenswrapper[4842]: I1111 14:35:17.541225 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4g47v" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="registry-server" containerID="cri-o://3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8" gracePeriod=2 Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.015064 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.117988 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skdzk\" (UniqueName: \"kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk\") pod \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.118134 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities\") pod \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.118211 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content\") pod \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\" (UID: \"fa7267a2-7fb5-4d6b-8ac0-a70164f10500\") " Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.119196 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities" (OuterVolumeSpecName: "utilities") pod "fa7267a2-7fb5-4d6b-8ac0-a70164f10500" (UID: "fa7267a2-7fb5-4d6b-8ac0-a70164f10500"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.125894 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk" (OuterVolumeSpecName: "kube-api-access-skdzk") pod "fa7267a2-7fb5-4d6b-8ac0-a70164f10500" (UID: "fa7267a2-7fb5-4d6b-8ac0-a70164f10500"). InnerVolumeSpecName "kube-api-access-skdzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.182752 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa7267a2-7fb5-4d6b-8ac0-a70164f10500" (UID: "fa7267a2-7fb5-4d6b-8ac0-a70164f10500"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.221400 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.221441 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skdzk\" (UniqueName: \"kubernetes.io/projected/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-kube-api-access-skdzk\") on node \"crc\" DevicePath \"\"" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.221454 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa7267a2-7fb5-4d6b-8ac0-a70164f10500-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.550899 4842 generic.go:334] "Generic (PLEG): container finished" podID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerID="3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8" exitCode=0 Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.550945 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4g47v" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.550968 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerDied","Data":"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8"} Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.551911 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4g47v" event={"ID":"fa7267a2-7fb5-4d6b-8ac0-a70164f10500","Type":"ContainerDied","Data":"7158eaf46e1b50e52542d919f4ec3cca37eb58d3f4681a8c99843c8cc631e992"} Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.551932 4842 scope.go:117] "RemoveContainer" containerID="3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.595397 4842 scope.go:117] "RemoveContainer" containerID="a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.610462 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.624774 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4g47v"] Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.636881 4842 scope.go:117] "RemoveContainer" containerID="c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.675872 4842 scope.go:117] "RemoveContainer" containerID="3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8" Nov 11 14:35:18 crc kubenswrapper[4842]: E1111 14:35:18.676298 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8\": container with ID starting with 3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8 not found: ID does not exist" containerID="3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.676348 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8"} err="failed to get container status \"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8\": rpc error: code = NotFound desc = could not find container \"3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8\": container with ID starting with 3fcdfc2a0852b5fc7605f51e2ac0693fb09cd1bd34080ed17cd798218bdef9b8 not found: ID does not exist" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.676380 4842 scope.go:117] "RemoveContainer" containerID="a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e" Nov 11 14:35:18 crc kubenswrapper[4842]: E1111 14:35:18.677018 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e\": container with ID starting with a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e not found: ID does not exist" containerID="a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.677044 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e"} err="failed to get container status \"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e\": rpc error: code = NotFound desc = could not find container \"a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e\": container with ID starting with a8ca0dbc3bed0f05fbde5196bb35cd2c47d4881ec4bcd22d39d4d9395c20dd5e not found: ID does not exist" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.677066 4842 scope.go:117] "RemoveContainer" containerID="c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d" Nov 11 14:35:18 crc kubenswrapper[4842]: E1111 14:35:18.677558 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d\": container with ID starting with c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d not found: ID does not exist" containerID="c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d" Nov 11 14:35:18 crc kubenswrapper[4842]: I1111 14:35:18.677600 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d"} err="failed to get container status \"c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d\": rpc error: code = NotFound desc = could not find container \"c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d\": container with ID starting with c88ff607b68f50f66912c25b2ec2924fb738daa0068a6c2e58a544cf6325a43d not found: ID does not exist" Nov 11 14:35:20 crc kubenswrapper[4842]: I1111 14:35:20.072511 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" path="/var/lib/kubelet/pods/fa7267a2-7fb5-4d6b-8ac0-a70164f10500/volumes" Nov 11 14:35:29 crc kubenswrapper[4842]: I1111 14:35:29.059413 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:35:29 crc kubenswrapper[4842]: E1111 14:35:29.060183 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:35:40 crc kubenswrapper[4842]: I1111 14:35:40.075775 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:35:40 crc kubenswrapper[4842]: E1111 14:35:40.077831 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:35:54 crc kubenswrapper[4842]: I1111 14:35:54.060085 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:35:54 crc kubenswrapper[4842]: E1111 14:35:54.060966 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.136546 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:03 crc kubenswrapper[4842]: E1111 14:36:03.137488 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="extract-utilities" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.137503 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="extract-utilities" Nov 11 14:36:03 crc kubenswrapper[4842]: E1111 14:36:03.137526 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="registry-server" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.137535 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="registry-server" Nov 11 14:36:03 crc kubenswrapper[4842]: E1111 14:36:03.137556 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="extract-content" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.137563 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="extract-content" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.137780 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa7267a2-7fb5-4d6b-8ac0-a70164f10500" containerName="registry-server" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.142939 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.152769 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.194027 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4ncj\" (UniqueName: \"kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.194175 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.194197 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.296276 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4ncj\" (UniqueName: \"kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.296612 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.296630 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.297134 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.297552 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.331660 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4ncj\" (UniqueName: \"kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj\") pod \"redhat-operators-sp45s\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.472467 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.920736 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:03 crc kubenswrapper[4842]: I1111 14:36:03.967994 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerStarted","Data":"188e3ac3c36d95b306a17d612fa080598c729a9e9a1e73f2ddff5d0f0b526845"} Nov 11 14:36:04 crc kubenswrapper[4842]: I1111 14:36:04.979923 4842 generic.go:334] "Generic (PLEG): container finished" podID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerID="5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f" exitCode=0 Nov 11 14:36:04 crc kubenswrapper[4842]: I1111 14:36:04.980009 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerDied","Data":"5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f"} Nov 11 14:36:04 crc kubenswrapper[4842]: I1111 14:36:04.982941 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:36:06 crc kubenswrapper[4842]: I1111 14:36:06.009287 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerStarted","Data":"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855"} Nov 11 14:36:06 crc kubenswrapper[4842]: I1111 14:36:06.059651 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:36:06 crc kubenswrapper[4842]: E1111 14:36:06.060230 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:36:10 crc kubenswrapper[4842]: I1111 14:36:10.058173 4842 generic.go:334] "Generic (PLEG): container finished" podID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerID="73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855" exitCode=0 Nov 11 14:36:10 crc kubenswrapper[4842]: I1111 14:36:10.069570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerDied","Data":"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855"} Nov 11 14:36:11 crc kubenswrapper[4842]: I1111 14:36:11.069154 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerStarted","Data":"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977"} Nov 11 14:36:11 crc kubenswrapper[4842]: I1111 14:36:11.096740 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sp45s" podStartSLOduration=2.401839346 podStartE2EDuration="8.096720721s" podCreationTimestamp="2025-11-11 14:36:03 +0000 UTC" firstStartedPulling="2025-11-11 14:36:04.982674024 +0000 UTC m=+3975.642963643" lastFinishedPulling="2025-11-11 14:36:10.677555399 +0000 UTC m=+3981.337845018" observedRunningTime="2025-11-11 14:36:11.086342417 +0000 UTC m=+3981.746632046" watchObservedRunningTime="2025-11-11 14:36:11.096720721 +0000 UTC m=+3981.757010340" Nov 11 14:36:13 crc kubenswrapper[4842]: I1111 14:36:13.472832 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:13 crc kubenswrapper[4842]: I1111 14:36:13.473467 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:14 crc kubenswrapper[4842]: I1111 14:36:14.517411 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sp45s" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="registry-server" probeResult="failure" output=< Nov 11 14:36:14 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 14:36:14 crc kubenswrapper[4842]: > Nov 11 14:36:19 crc kubenswrapper[4842]: I1111 14:36:19.059658 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:36:19 crc kubenswrapper[4842]: E1111 14:36:19.060521 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:36:23 crc kubenswrapper[4842]: I1111 14:36:23.521977 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:23 crc kubenswrapper[4842]: I1111 14:36:23.576614 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:23 crc kubenswrapper[4842]: I1111 14:36:23.755668 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.195921 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sp45s" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="registry-server" containerID="cri-o://3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977" gracePeriod=2 Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.678599 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.851899 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities\") pod \"995f20dd-567d-4375-9366-4d1d31bd2d58\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.852612 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4ncj\" (UniqueName: \"kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj\") pod \"995f20dd-567d-4375-9366-4d1d31bd2d58\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.852832 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content\") pod \"995f20dd-567d-4375-9366-4d1d31bd2d58\" (UID: \"995f20dd-567d-4375-9366-4d1d31bd2d58\") " Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.853272 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities" (OuterVolumeSpecName: "utilities") pod "995f20dd-567d-4375-9366-4d1d31bd2d58" (UID: "995f20dd-567d-4375-9366-4d1d31bd2d58"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.853642 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.861294 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj" (OuterVolumeSpecName: "kube-api-access-m4ncj") pod "995f20dd-567d-4375-9366-4d1d31bd2d58" (UID: "995f20dd-567d-4375-9366-4d1d31bd2d58"). InnerVolumeSpecName "kube-api-access-m4ncj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.955456 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4ncj\" (UniqueName: \"kubernetes.io/projected/995f20dd-567d-4375-9366-4d1d31bd2d58-kube-api-access-m4ncj\") on node \"crc\" DevicePath \"\"" Nov 11 14:36:25 crc kubenswrapper[4842]: I1111 14:36:25.956899 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "995f20dd-567d-4375-9366-4d1d31bd2d58" (UID: "995f20dd-567d-4375-9366-4d1d31bd2d58"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.056225 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/995f20dd-567d-4375-9366-4d1d31bd2d58-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.206792 4842 generic.go:334] "Generic (PLEG): container finished" podID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerID="3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977" exitCode=0 Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.206830 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerDied","Data":"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977"} Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.206852 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sp45s" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.206867 4842 scope.go:117] "RemoveContainer" containerID="3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.206855 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sp45s" event={"ID":"995f20dd-567d-4375-9366-4d1d31bd2d58","Type":"ContainerDied","Data":"188e3ac3c36d95b306a17d612fa080598c729a9e9a1e73f2ddff5d0f0b526845"} Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.231370 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.239630 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sp45s"] Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.240310 4842 scope.go:117] "RemoveContainer" containerID="73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.262675 4842 scope.go:117] "RemoveContainer" containerID="5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.320707 4842 scope.go:117] "RemoveContainer" containerID="3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977" Nov 11 14:36:26 crc kubenswrapper[4842]: E1111 14:36:26.321527 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977\": container with ID starting with 3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977 not found: ID does not exist" containerID="3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.321582 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977"} err="failed to get container status \"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977\": rpc error: code = NotFound desc = could not find container \"3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977\": container with ID starting with 3c9a46fbf689bfc27b915c3e3ffa1b30758ade80529bb451828da0a4a8760977 not found: ID does not exist" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.321622 4842 scope.go:117] "RemoveContainer" containerID="73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855" Nov 11 14:36:26 crc kubenswrapper[4842]: E1111 14:36:26.321914 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855\": container with ID starting with 73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855 not found: ID does not exist" containerID="73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.321945 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855"} err="failed to get container status \"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855\": rpc error: code = NotFound desc = could not find container \"73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855\": container with ID starting with 73ef0d2cf98859223fe74db29adc2b78da0b6a47df46547f78c683a7423e3855 not found: ID does not exist" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.321963 4842 scope.go:117] "RemoveContainer" containerID="5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f" Nov 11 14:36:26 crc kubenswrapper[4842]: E1111 14:36:26.322570 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f\": container with ID starting with 5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f not found: ID does not exist" containerID="5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f" Nov 11 14:36:26 crc kubenswrapper[4842]: I1111 14:36:26.322607 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f"} err="failed to get container status \"5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f\": rpc error: code = NotFound desc = could not find container \"5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f\": container with ID starting with 5ef81de3e7129c2acf4b80f8e3a3c42b653361e2c1e73f5115da9f96b4efda5f not found: ID does not exist" Nov 11 14:36:28 crc kubenswrapper[4842]: I1111 14:36:28.070563 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" path="/var/lib/kubelet/pods/995f20dd-567d-4375-9366-4d1d31bd2d58/volumes" Nov 11 14:36:34 crc kubenswrapper[4842]: I1111 14:36:34.059185 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:36:34 crc kubenswrapper[4842]: E1111 14:36:34.059851 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:36:48 crc kubenswrapper[4842]: I1111 14:36:48.059644 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:36:48 crc kubenswrapper[4842]: E1111 14:36:48.060556 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:37:00 crc kubenswrapper[4842]: I1111 14:37:00.066929 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:37:00 crc kubenswrapper[4842]: E1111 14:37:00.068788 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:37:12 crc kubenswrapper[4842]: I1111 14:37:12.059520 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:37:12 crc kubenswrapper[4842]: E1111 14:37:12.060145 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:37:26 crc kubenswrapper[4842]: I1111 14:37:26.062335 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:37:26 crc kubenswrapper[4842]: E1111 14:37:26.063778 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:37:38 crc kubenswrapper[4842]: I1111 14:37:38.060496 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:37:38 crc kubenswrapper[4842]: E1111 14:37:38.061331 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:37:49 crc kubenswrapper[4842]: I1111 14:37:49.060004 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:37:49 crc kubenswrapper[4842]: E1111 14:37:49.060825 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:38:00 crc kubenswrapper[4842]: I1111 14:38:00.067569 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:38:00 crc kubenswrapper[4842]: E1111 14:38:00.068812 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:38:13 crc kubenswrapper[4842]: I1111 14:38:13.059607 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:38:13 crc kubenswrapper[4842]: E1111 14:38:13.060530 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:38:28 crc kubenswrapper[4842]: I1111 14:38:28.059178 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:38:28 crc kubenswrapper[4842]: E1111 14:38:28.059970 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:38:40 crc kubenswrapper[4842]: I1111 14:38:40.067603 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:38:40 crc kubenswrapper[4842]: E1111 14:38:40.068401 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:38:51 crc kubenswrapper[4842]: I1111 14:38:51.059471 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:38:51 crc kubenswrapper[4842]: I1111 14:38:51.621694 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149"} Nov 11 14:39:13 crc kubenswrapper[4842]: E1111 14:39:13.052927 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:53866->38.102.83.155:44429: write tcp 38.102.83.155:53866->38.102.83.155:44429: write: broken pipe Nov 11 14:39:26 crc kubenswrapper[4842]: E1111 14:39:26.670611 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:50512->38.102.83.155:44429: write tcp 38.102.83.155:50512->38.102.83.155:44429: write: broken pipe Nov 11 14:41:14 crc kubenswrapper[4842]: I1111 14:41:14.960873 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:41:14 crc kubenswrapper[4842]: I1111 14:41:14.961514 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:41:44 crc kubenswrapper[4842]: I1111 14:41:44.961234 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:41:44 crc kubenswrapper[4842]: I1111 14:41:44.961804 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:42:14 crc kubenswrapper[4842]: I1111 14:42:14.961643 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:42:14 crc kubenswrapper[4842]: I1111 14:42:14.962359 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:42:14 crc kubenswrapper[4842]: I1111 14:42:14.962420 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:42:14 crc kubenswrapper[4842]: I1111 14:42:14.963516 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:42:14 crc kubenswrapper[4842]: I1111 14:42:14.963617 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149" gracePeriod=600 Nov 11 14:42:15 crc kubenswrapper[4842]: I1111 14:42:15.554288 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149" exitCode=0 Nov 11 14:42:15 crc kubenswrapper[4842]: I1111 14:42:15.554363 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149"} Nov 11 14:42:15 crc kubenswrapper[4842]: I1111 14:42:15.554671 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6"} Nov 11 14:42:15 crc kubenswrapper[4842]: I1111 14:42:15.554699 4842 scope.go:117] "RemoveContainer" containerID="b7bcae3113559f3a08ab08a72d1edcaeb1cc38c7ea9ab864cfdf4afcda5ea088" Nov 11 14:44:44 crc kubenswrapper[4842]: I1111 14:44:44.960791 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:44:44 crc kubenswrapper[4842]: I1111 14:44:44.961420 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.153709 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr"] Nov 11 14:45:00 crc kubenswrapper[4842]: E1111 14:45:00.155016 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="extract-content" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.155038 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="extract-content" Nov 11 14:45:00 crc kubenswrapper[4842]: E1111 14:45:00.155130 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="registry-server" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.155142 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="registry-server" Nov 11 14:45:00 crc kubenswrapper[4842]: E1111 14:45:00.155186 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="extract-utilities" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.155196 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="extract-utilities" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.155453 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="995f20dd-567d-4375-9366-4d1d31bd2d58" containerName="registry-server" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.156384 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.160529 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.162302 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.180960 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr"] Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.257043 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fmzn\" (UniqueName: \"kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.257603 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.257855 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.359945 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fmzn\" (UniqueName: \"kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.360073 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.360153 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.361222 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.384819 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fmzn\" (UniqueName: \"kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.386236 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume\") pod \"collect-profiles-29381205-xsmgr\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.491954 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:00 crc kubenswrapper[4842]: I1111 14:45:00.922725 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr"] Nov 11 14:45:01 crc kubenswrapper[4842]: I1111 14:45:01.090020 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" event={"ID":"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc","Type":"ContainerStarted","Data":"b5e4de3d3bec4939f8995f8ee07d7cc4a037f36fb466829dc1df40b255bd46b8"} Nov 11 14:45:01 crc kubenswrapper[4842]: I1111 14:45:01.111850 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" podStartSLOduration=1.111826749 podStartE2EDuration="1.111826749s" podCreationTimestamp="2025-11-11 14:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 14:45:01.106383658 +0000 UTC m=+4511.766673277" watchObservedRunningTime="2025-11-11 14:45:01.111826749 +0000 UTC m=+4511.772116368" Nov 11 14:45:02 crc kubenswrapper[4842]: I1111 14:45:02.101522 4842 generic.go:334] "Generic (PLEG): container finished" podID="cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" containerID="0a5a6ba13bd35204a884909d8489e57e87fb047cf6c1619326686263bdb41a3c" exitCode=0 Nov 11 14:45:02 crc kubenswrapper[4842]: I1111 14:45:02.101586 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" event={"ID":"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc","Type":"ContainerDied","Data":"0a5a6ba13bd35204a884909d8489e57e87fb047cf6c1619326686263bdb41a3c"} Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.483699 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.529608 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume\") pod \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.529851 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume\") pod \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.529905 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fmzn\" (UniqueName: \"kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn\") pod \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\" (UID: \"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc\") " Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.530750 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume" (OuterVolumeSpecName: "config-volume") pod "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" (UID: "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.531749 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.537165 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn" (OuterVolumeSpecName: "kube-api-access-2fmzn") pod "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" (UID: "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc"). InnerVolumeSpecName "kube-api-access-2fmzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.537325 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" (UID: "cc6e3acc-fa5d-478b-9269-8b314ac3d8dc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.633598 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 14:45:03 crc kubenswrapper[4842]: I1111 14:45:03.633846 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fmzn\" (UniqueName: \"kubernetes.io/projected/cc6e3acc-fa5d-478b-9269-8b314ac3d8dc-kube-api-access-2fmzn\") on node \"crc\" DevicePath \"\"" Nov 11 14:45:04 crc kubenswrapper[4842]: I1111 14:45:04.119679 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" event={"ID":"cc6e3acc-fa5d-478b-9269-8b314ac3d8dc","Type":"ContainerDied","Data":"b5e4de3d3bec4939f8995f8ee07d7cc4a037f36fb466829dc1df40b255bd46b8"} Nov 11 14:45:04 crc kubenswrapper[4842]: I1111 14:45:04.119725 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5e4de3d3bec4939f8995f8ee07d7cc4a037f36fb466829dc1df40b255bd46b8" Nov 11 14:45:04 crc kubenswrapper[4842]: I1111 14:45:04.119813 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381205-xsmgr" Nov 11 14:45:04 crc kubenswrapper[4842]: I1111 14:45:04.561788 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq"] Nov 11 14:45:04 crc kubenswrapper[4842]: I1111 14:45:04.570992 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381160-k85hq"] Nov 11 14:45:06 crc kubenswrapper[4842]: I1111 14:45:06.077265 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33473766-ea57-42af-8f97-f111ef36b159" path="/var/lib/kubelet/pods/33473766-ea57-42af-8f97-f111ef36b159/volumes" Nov 11 14:45:14 crc kubenswrapper[4842]: I1111 14:45:14.961446 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:45:14 crc kubenswrapper[4842]: I1111 14:45:14.961977 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:45:44 crc kubenswrapper[4842]: I1111 14:45:44.961390 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:45:44 crc kubenswrapper[4842]: I1111 14:45:44.961921 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:45:44 crc kubenswrapper[4842]: I1111 14:45:44.961956 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:45:44 crc kubenswrapper[4842]: I1111 14:45:44.962714 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:45:44 crc kubenswrapper[4842]: I1111 14:45:44.962759 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" gracePeriod=600 Nov 11 14:45:45 crc kubenswrapper[4842]: E1111 14:45:45.086800 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:45:45 crc kubenswrapper[4842]: I1111 14:45:45.530173 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" exitCode=0 Nov 11 14:45:45 crc kubenswrapper[4842]: I1111 14:45:45.530230 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6"} Nov 11 14:45:45 crc kubenswrapper[4842]: I1111 14:45:45.530577 4842 scope.go:117] "RemoveContainer" containerID="175a1fbfdc9a69c45e62c5f9a1d287142ffd0fe23f567047e963d820600ee149" Nov 11 14:45:45 crc kubenswrapper[4842]: I1111 14:45:45.531090 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:45:45 crc kubenswrapper[4842]: E1111 14:45:45.531440 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.112633 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:45:57 crc kubenswrapper[4842]: E1111 14:45:57.115474 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" containerName="collect-profiles" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.115501 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" containerName="collect-profiles" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.115861 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc6e3acc-fa5d-478b-9269-8b314ac3d8dc" containerName="collect-profiles" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.118387 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.121955 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.208556 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nff5\" (UniqueName: \"kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.208941 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.209086 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.311414 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nff5\" (UniqueName: \"kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.311466 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.311537 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.312166 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.312441 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.335279 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nff5\" (UniqueName: \"kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5\") pod \"community-operators-frfph\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.447057 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:45:57 crc kubenswrapper[4842]: I1111 14:45:57.979825 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:45:58 crc kubenswrapper[4842]: I1111 14:45:58.691006 4842 generic.go:334] "Generic (PLEG): container finished" podID="28b27442-542d-47d3-87c8-e923844a511c" containerID="b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69" exitCode=0 Nov 11 14:45:58 crc kubenswrapper[4842]: I1111 14:45:58.691050 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerDied","Data":"b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69"} Nov 11 14:45:58 crc kubenswrapper[4842]: I1111 14:45:58.691076 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerStarted","Data":"b346ddce82062bd89b68482f9c9d59809d1cef940a0bf880b3434d1ffeaff42f"} Nov 11 14:45:58 crc kubenswrapper[4842]: I1111 14:45:58.693253 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:45:59 crc kubenswrapper[4842]: I1111 14:45:59.702058 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerStarted","Data":"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7"} Nov 11 14:46:00 crc kubenswrapper[4842]: I1111 14:46:00.711247 4842 generic.go:334] "Generic (PLEG): container finished" podID="28b27442-542d-47d3-87c8-e923844a511c" containerID="28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7" exitCode=0 Nov 11 14:46:00 crc kubenswrapper[4842]: I1111 14:46:00.711298 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerDied","Data":"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7"} Nov 11 14:46:01 crc kubenswrapper[4842]: I1111 14:46:01.059218 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:46:01 crc kubenswrapper[4842]: E1111 14:46:01.059462 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:46:01 crc kubenswrapper[4842]: I1111 14:46:01.726375 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerStarted","Data":"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34"} Nov 11 14:46:01 crc kubenswrapper[4842]: I1111 14:46:01.746586 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-frfph" podStartSLOduration=2.19585132 podStartE2EDuration="4.746563107s" podCreationTimestamp="2025-11-11 14:45:57 +0000 UTC" firstStartedPulling="2025-11-11 14:45:58.692904572 +0000 UTC m=+4569.353194191" lastFinishedPulling="2025-11-11 14:46:01.243616359 +0000 UTC m=+4571.903905978" observedRunningTime="2025-11-11 14:46:01.744469572 +0000 UTC m=+4572.404759231" watchObservedRunningTime="2025-11-11 14:46:01.746563107 +0000 UTC m=+4572.406852726" Nov 11 14:46:01 crc kubenswrapper[4842]: I1111 14:46:01.764915 4842 scope.go:117] "RemoveContainer" containerID="7dfd97f5b25ad997eb0875203e2b586cd0b3e9a4cabaa1b6f088b7512c3e2bc7" Nov 11 14:46:07 crc kubenswrapper[4842]: I1111 14:46:07.447653 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:07 crc kubenswrapper[4842]: I1111 14:46:07.449278 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:07 crc kubenswrapper[4842]: I1111 14:46:07.512015 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:07 crc kubenswrapper[4842]: I1111 14:46:07.821853 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:07 crc kubenswrapper[4842]: I1111 14:46:07.866558 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:46:09 crc kubenswrapper[4842]: I1111 14:46:09.799306 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-frfph" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="registry-server" containerID="cri-o://058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34" gracePeriod=2 Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.327940 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.425223 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities\") pod \"28b27442-542d-47d3-87c8-e923844a511c\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.425415 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nff5\" (UniqueName: \"kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5\") pod \"28b27442-542d-47d3-87c8-e923844a511c\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.425544 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content\") pod \"28b27442-542d-47d3-87c8-e923844a511c\" (UID: \"28b27442-542d-47d3-87c8-e923844a511c\") " Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.426594 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities" (OuterVolumeSpecName: "utilities") pod "28b27442-542d-47d3-87c8-e923844a511c" (UID: "28b27442-542d-47d3-87c8-e923844a511c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.434040 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5" (OuterVolumeSpecName: "kube-api-access-7nff5") pod "28b27442-542d-47d3-87c8-e923844a511c" (UID: "28b27442-542d-47d3-87c8-e923844a511c"). InnerVolumeSpecName "kube-api-access-7nff5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.479857 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28b27442-542d-47d3-87c8-e923844a511c" (UID: "28b27442-542d-47d3-87c8-e923844a511c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.528110 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.528431 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nff5\" (UniqueName: \"kubernetes.io/projected/28b27442-542d-47d3-87c8-e923844a511c-kube-api-access-7nff5\") on node \"crc\" DevicePath \"\"" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.528447 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b27442-542d-47d3-87c8-e923844a511c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.810168 4842 generic.go:334] "Generic (PLEG): container finished" podID="28b27442-542d-47d3-87c8-e923844a511c" containerID="058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34" exitCode=0 Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.810212 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerDied","Data":"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34"} Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.810240 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-frfph" event={"ID":"28b27442-542d-47d3-87c8-e923844a511c","Type":"ContainerDied","Data":"b346ddce82062bd89b68482f9c9d59809d1cef940a0bf880b3434d1ffeaff42f"} Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.810258 4842 scope.go:117] "RemoveContainer" containerID="058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.810393 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-frfph" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.841357 4842 scope.go:117] "RemoveContainer" containerID="28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.845522 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.855406 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-frfph"] Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.862136 4842 scope.go:117] "RemoveContainer" containerID="b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.920455 4842 scope.go:117] "RemoveContainer" containerID="058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34" Nov 11 14:46:10 crc kubenswrapper[4842]: E1111 14:46:10.920892 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34\": container with ID starting with 058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34 not found: ID does not exist" containerID="058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.920928 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34"} err="failed to get container status \"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34\": rpc error: code = NotFound desc = could not find container \"058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34\": container with ID starting with 058f15c149b5841306045fb6335decd1454b1bcdaaa5490acec5f0559bfd6b34 not found: ID does not exist" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.920952 4842 scope.go:117] "RemoveContainer" containerID="28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7" Nov 11 14:46:10 crc kubenswrapper[4842]: E1111 14:46:10.921213 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7\": container with ID starting with 28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7 not found: ID does not exist" containerID="28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.921236 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7"} err="failed to get container status \"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7\": rpc error: code = NotFound desc = could not find container \"28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7\": container with ID starting with 28cd950a46811e8f92151b2f3af04b1d553fd59a362a2fecaf86df1a032366e7 not found: ID does not exist" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.921254 4842 scope.go:117] "RemoveContainer" containerID="b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69" Nov 11 14:46:10 crc kubenswrapper[4842]: E1111 14:46:10.921536 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69\": container with ID starting with b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69 not found: ID does not exist" containerID="b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69" Nov 11 14:46:10 crc kubenswrapper[4842]: I1111 14:46:10.921559 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69"} err="failed to get container status \"b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69\": rpc error: code = NotFound desc = could not find container \"b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69\": container with ID starting with b0098ea350ab6c97290ac6d06a2788834797116e7b8304b18766ea74261a5a69 not found: ID does not exist" Nov 11 14:46:12 crc kubenswrapper[4842]: I1111 14:46:12.059571 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:46:12 crc kubenswrapper[4842]: E1111 14:46:12.060044 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:46:12 crc kubenswrapper[4842]: I1111 14:46:12.073523 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b27442-542d-47d3-87c8-e923844a511c" path="/var/lib/kubelet/pods/28b27442-542d-47d3-87c8-e923844a511c/volumes" Nov 11 14:46:27 crc kubenswrapper[4842]: I1111 14:46:27.059395 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:46:27 crc kubenswrapper[4842]: E1111 14:46:27.060273 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:46:40 crc kubenswrapper[4842]: I1111 14:46:40.066027 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:46:40 crc kubenswrapper[4842]: E1111 14:46:40.066741 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:46:53 crc kubenswrapper[4842]: I1111 14:46:53.059849 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:46:53 crc kubenswrapper[4842]: E1111 14:46:53.060647 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.004769 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:46:58 crc kubenswrapper[4842]: E1111 14:46:58.006001 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="registry-server" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.006016 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="registry-server" Nov 11 14:46:58 crc kubenswrapper[4842]: E1111 14:46:58.006060 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="extract-content" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.006066 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="extract-content" Nov 11 14:46:58 crc kubenswrapper[4842]: E1111 14:46:58.006076 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="extract-utilities" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.006082 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="extract-utilities" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.006303 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b27442-542d-47d3-87c8-e923844a511c" containerName="registry-server" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.008214 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.025744 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.049404 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjtrf\" (UniqueName: \"kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.049487 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.049742 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.151287 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.151629 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.151714 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjtrf\" (UniqueName: \"kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.152444 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.152604 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.181935 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjtrf\" (UniqueName: \"kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf\") pod \"redhat-operators-fd75x\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.338969 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:46:58 crc kubenswrapper[4842]: I1111 14:46:58.842577 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:46:59 crc kubenswrapper[4842]: I1111 14:46:59.268608 4842 generic.go:334] "Generic (PLEG): container finished" podID="228c0a2f-ad7c-41e4-ad91-332682236803" containerID="072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c" exitCode=0 Nov 11 14:46:59 crc kubenswrapper[4842]: I1111 14:46:59.268662 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerDied","Data":"072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c"} Nov 11 14:46:59 crc kubenswrapper[4842]: I1111 14:46:59.268936 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerStarted","Data":"1cadf325ba59e27091541f8ebdd157ad9111f3705cf7c84728f1342ff0901867"} Nov 11 14:47:01 crc kubenswrapper[4842]: I1111 14:47:01.290378 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerStarted","Data":"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5"} Nov 11 14:47:05 crc kubenswrapper[4842]: I1111 14:47:05.324866 4842 generic.go:334] "Generic (PLEG): container finished" podID="228c0a2f-ad7c-41e4-ad91-332682236803" containerID="a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5" exitCode=0 Nov 11 14:47:05 crc kubenswrapper[4842]: I1111 14:47:05.324970 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerDied","Data":"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5"} Nov 11 14:47:06 crc kubenswrapper[4842]: I1111 14:47:06.340905 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerStarted","Data":"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f"} Nov 11 14:47:06 crc kubenswrapper[4842]: I1111 14:47:06.365194 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fd75x" podStartSLOduration=2.853371905 podStartE2EDuration="9.36516643s" podCreationTimestamp="2025-11-11 14:46:57 +0000 UTC" firstStartedPulling="2025-11-11 14:46:59.270795002 +0000 UTC m=+4629.931084611" lastFinishedPulling="2025-11-11 14:47:05.782589517 +0000 UTC m=+4636.442879136" observedRunningTime="2025-11-11 14:47:06.362744974 +0000 UTC m=+4637.023034613" watchObservedRunningTime="2025-11-11 14:47:06.36516643 +0000 UTC m=+4637.025456049" Nov 11 14:47:08 crc kubenswrapper[4842]: I1111 14:47:08.065910 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:47:08 crc kubenswrapper[4842]: E1111 14:47:08.067569 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:47:08 crc kubenswrapper[4842]: I1111 14:47:08.339836 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:08 crc kubenswrapper[4842]: I1111 14:47:08.339890 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:09 crc kubenswrapper[4842]: I1111 14:47:09.396392 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fd75x" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" probeResult="failure" output=< Nov 11 14:47:09 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 14:47:09 crc kubenswrapper[4842]: > Nov 11 14:47:19 crc kubenswrapper[4842]: I1111 14:47:19.060254 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:47:19 crc kubenswrapper[4842]: E1111 14:47:19.061083 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:47:19 crc kubenswrapper[4842]: I1111 14:47:19.386249 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fd75x" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" probeResult="failure" output=< Nov 11 14:47:19 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 14:47:19 crc kubenswrapper[4842]: > Nov 11 14:47:28 crc kubenswrapper[4842]: I1111 14:47:28.386038 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:28 crc kubenswrapper[4842]: I1111 14:47:28.437902 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:29 crc kubenswrapper[4842]: I1111 14:47:29.208234 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:47:29 crc kubenswrapper[4842]: I1111 14:47:29.834555 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fd75x" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" containerID="cri-o://231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f" gracePeriod=2 Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.302558 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.346843 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities\") pod \"228c0a2f-ad7c-41e4-ad91-332682236803\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.346937 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content\") pod \"228c0a2f-ad7c-41e4-ad91-332682236803\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.347127 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjtrf\" (UniqueName: \"kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf\") pod \"228c0a2f-ad7c-41e4-ad91-332682236803\" (UID: \"228c0a2f-ad7c-41e4-ad91-332682236803\") " Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.348085 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities" (OuterVolumeSpecName: "utilities") pod "228c0a2f-ad7c-41e4-ad91-332682236803" (UID: "228c0a2f-ad7c-41e4-ad91-332682236803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.350130 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.358798 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf" (OuterVolumeSpecName: "kube-api-access-sjtrf") pod "228c0a2f-ad7c-41e4-ad91-332682236803" (UID: "228c0a2f-ad7c-41e4-ad91-332682236803"). InnerVolumeSpecName "kube-api-access-sjtrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.434444 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "228c0a2f-ad7c-41e4-ad91-332682236803" (UID: "228c0a2f-ad7c-41e4-ad91-332682236803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.452613 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjtrf\" (UniqueName: \"kubernetes.io/projected/228c0a2f-ad7c-41e4-ad91-332682236803-kube-api-access-sjtrf\") on node \"crc\" DevicePath \"\"" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.452649 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/228c0a2f-ad7c-41e4-ad91-332682236803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.847642 4842 generic.go:334] "Generic (PLEG): container finished" podID="228c0a2f-ad7c-41e4-ad91-332682236803" containerID="231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f" exitCode=0 Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.847695 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerDied","Data":"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f"} Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.847727 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fd75x" event={"ID":"228c0a2f-ad7c-41e4-ad91-332682236803","Type":"ContainerDied","Data":"1cadf325ba59e27091541f8ebdd157ad9111f3705cf7c84728f1342ff0901867"} Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.847738 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fd75x" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.847748 4842 scope.go:117] "RemoveContainer" containerID="231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.874461 4842 scope.go:117] "RemoveContainer" containerID="a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.898630 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.900322 4842 scope.go:117] "RemoveContainer" containerID="072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.915468 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fd75x"] Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.948319 4842 scope.go:117] "RemoveContainer" containerID="231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f" Nov 11 14:47:30 crc kubenswrapper[4842]: E1111 14:47:30.948649 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f\": container with ID starting with 231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f not found: ID does not exist" containerID="231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.948678 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f"} err="failed to get container status \"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f\": rpc error: code = NotFound desc = could not find container \"231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f\": container with ID starting with 231a465e84b04c9b48e1df035d5ed450bdef1bdde403ada1f0a7502fdc458f8f not found: ID does not exist" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.948699 4842 scope.go:117] "RemoveContainer" containerID="a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5" Nov 11 14:47:30 crc kubenswrapper[4842]: E1111 14:47:30.948922 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5\": container with ID starting with a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5 not found: ID does not exist" containerID="a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.948949 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5"} err="failed to get container status \"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5\": rpc error: code = NotFound desc = could not find container \"a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5\": container with ID starting with a9b843ad642ca9b86b0d468a1164c814ef7a10a2589c8b1ddb8aa7f618a670d5 not found: ID does not exist" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.948961 4842 scope.go:117] "RemoveContainer" containerID="072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c" Nov 11 14:47:30 crc kubenswrapper[4842]: E1111 14:47:30.949196 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c\": container with ID starting with 072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c not found: ID does not exist" containerID="072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c" Nov 11 14:47:30 crc kubenswrapper[4842]: I1111 14:47:30.949294 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c"} err="failed to get container status \"072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c\": rpc error: code = NotFound desc = could not find container \"072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c\": container with ID starting with 072893325cdc099474797e6a1f980bdf3c7b537d364e3c79d5f6d5b85834653c not found: ID does not exist" Nov 11 14:47:32 crc kubenswrapper[4842]: I1111 14:47:32.059311 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:47:32 crc kubenswrapper[4842]: E1111 14:47:32.061012 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:47:32 crc kubenswrapper[4842]: I1111 14:47:32.076464 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" path="/var/lib/kubelet/pods/228c0a2f-ad7c-41e4-ad91-332682236803/volumes" Nov 11 14:47:44 crc kubenswrapper[4842]: I1111 14:47:44.060378 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:47:44 crc kubenswrapper[4842]: E1111 14:47:44.061642 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:47:55 crc kubenswrapper[4842]: I1111 14:47:55.059610 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:47:55 crc kubenswrapper[4842]: E1111 14:47:55.060410 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:48:07 crc kubenswrapper[4842]: I1111 14:48:07.059646 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:48:07 crc kubenswrapper[4842]: E1111 14:48:07.060771 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:48:21 crc kubenswrapper[4842]: I1111 14:48:21.058747 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:48:21 crc kubenswrapper[4842]: E1111 14:48:21.059524 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:48:35 crc kubenswrapper[4842]: I1111 14:48:35.058663 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:48:35 crc kubenswrapper[4842]: E1111 14:48:35.059462 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:48:49 crc kubenswrapper[4842]: I1111 14:48:49.059699 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:48:49 crc kubenswrapper[4842]: E1111 14:48:49.060347 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:49:00 crc kubenswrapper[4842]: I1111 14:49:00.071324 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:49:00 crc kubenswrapper[4842]: E1111 14:49:00.072392 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:49:13 crc kubenswrapper[4842]: I1111 14:49:13.059551 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:49:13 crc kubenswrapper[4842]: E1111 14:49:13.060903 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.972165 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:25 crc kubenswrapper[4842]: E1111 14:49:25.974384 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="extract-utilities" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.974415 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="extract-utilities" Nov 11 14:49:25 crc kubenswrapper[4842]: E1111 14:49:25.974474 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="extract-content" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.974484 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="extract-content" Nov 11 14:49:25 crc kubenswrapper[4842]: E1111 14:49:25.974506 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.974514 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.974798 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="228c0a2f-ad7c-41e4-ad91-332682236803" containerName="registry-server" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.976877 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:25 crc kubenswrapper[4842]: I1111 14:49:25.984596 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.182950 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.183744 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwv9v\" (UniqueName: \"kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.183833 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.285906 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.286050 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwv9v\" (UniqueName: \"kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.286116 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.286521 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.286522 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.309375 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwv9v\" (UniqueName: \"kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v\") pod \"redhat-marketplace-kjjfl\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:26 crc kubenswrapper[4842]: I1111 14:49:26.604635 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:27 crc kubenswrapper[4842]: I1111 14:49:27.076018 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:49:27 crc kubenswrapper[4842]: E1111 14:49:27.076518 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:49:27 crc kubenswrapper[4842]: I1111 14:49:27.110141 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:28 crc kubenswrapper[4842]: I1111 14:49:28.149891 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerID="73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e" exitCode=0 Nov 11 14:49:28 crc kubenswrapper[4842]: I1111 14:49:28.149962 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerDied","Data":"73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e"} Nov 11 14:49:28 crc kubenswrapper[4842]: I1111 14:49:28.150264 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerStarted","Data":"930c7d2103fa23462edfb49154397af2980134c4206f5221e1f483d2d6e75135"} Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.173462 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerID="b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685" exitCode=0 Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.173512 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerDied","Data":"b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685"} Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.739042 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.742816 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.753089 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.799518 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.799577 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9l89\" (UniqueName: \"kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.799603 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.901217 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.901283 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9l89\" (UniqueName: \"kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.901309 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.902149 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.902237 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:30 crc kubenswrapper[4842]: I1111 14:49:30.934171 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9l89\" (UniqueName: \"kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89\") pod \"certified-operators-qqxkm\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:31 crc kubenswrapper[4842]: I1111 14:49:31.110427 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:31 crc kubenswrapper[4842]: I1111 14:49:31.230676 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerStarted","Data":"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279"} Nov 11 14:49:31 crc kubenswrapper[4842]: I1111 14:49:31.279938 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kjjfl" podStartSLOduration=3.7878210660000002 podStartE2EDuration="6.279921278s" podCreationTimestamp="2025-11-11 14:49:25 +0000 UTC" firstStartedPulling="2025-11-11 14:49:28.153312145 +0000 UTC m=+4778.813601764" lastFinishedPulling="2025-11-11 14:49:30.645412367 +0000 UTC m=+4781.305701976" observedRunningTime="2025-11-11 14:49:31.274559589 +0000 UTC m=+4781.934849238" watchObservedRunningTime="2025-11-11 14:49:31.279921278 +0000 UTC m=+4781.940210897" Nov 11 14:49:31 crc kubenswrapper[4842]: I1111 14:49:31.676431 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:32 crc kubenswrapper[4842]: I1111 14:49:32.241142 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerStarted","Data":"61e50398a8effba313d2cf6e93fbe4fc3be1bdf468b73e48667d4d88e91c0028"} Nov 11 14:49:33 crc kubenswrapper[4842]: I1111 14:49:33.252917 4842 generic.go:334] "Generic (PLEG): container finished" podID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerID="1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78" exitCode=0 Nov 11 14:49:33 crc kubenswrapper[4842]: I1111 14:49:33.252962 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerDied","Data":"1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78"} Nov 11 14:49:35 crc kubenswrapper[4842]: I1111 14:49:35.275984 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerStarted","Data":"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8"} Nov 11 14:49:36 crc kubenswrapper[4842]: I1111 14:49:36.286559 4842 generic.go:334] "Generic (PLEG): container finished" podID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerID="89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8" exitCode=0 Nov 11 14:49:36 crc kubenswrapper[4842]: I1111 14:49:36.286842 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerDied","Data":"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8"} Nov 11 14:49:36 crc kubenswrapper[4842]: I1111 14:49:36.605659 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:36 crc kubenswrapper[4842]: I1111 14:49:36.606438 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:36 crc kubenswrapper[4842]: I1111 14:49:36.663756 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:37 crc kubenswrapper[4842]: I1111 14:49:37.299942 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerStarted","Data":"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4"} Nov 11 14:49:37 crc kubenswrapper[4842]: I1111 14:49:37.334990 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qqxkm" podStartSLOduration=3.864513543 podStartE2EDuration="7.334966171s" podCreationTimestamp="2025-11-11 14:49:30 +0000 UTC" firstStartedPulling="2025-11-11 14:49:33.255925639 +0000 UTC m=+4783.916215258" lastFinishedPulling="2025-11-11 14:49:36.726378267 +0000 UTC m=+4787.386667886" observedRunningTime="2025-11-11 14:49:37.313741502 +0000 UTC m=+4787.974031131" watchObservedRunningTime="2025-11-11 14:49:37.334966171 +0000 UTC m=+4787.995255780" Nov 11 14:49:37 crc kubenswrapper[4842]: I1111 14:49:37.351697 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:38 crc kubenswrapper[4842]: I1111 14:49:38.342723 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:40 crc kubenswrapper[4842]: I1111 14:49:40.325312 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kjjfl" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="registry-server" containerID="cri-o://ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279" gracePeriod=2 Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.115368 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.115659 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.169784 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.302419 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.308560 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwv9v\" (UniqueName: \"kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v\") pod \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.308748 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content\") pod \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.308855 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities\") pod \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\" (UID: \"d8b66ca7-4a4c-4c99-a284-f089966eaeb1\") " Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.309993 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities" (OuterVolumeSpecName: "utilities") pod "d8b66ca7-4a4c-4c99-a284-f089966eaeb1" (UID: "d8b66ca7-4a4c-4c99-a284-f089966eaeb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.313618 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v" (OuterVolumeSpecName: "kube-api-access-dwv9v") pod "d8b66ca7-4a4c-4c99-a284-f089966eaeb1" (UID: "d8b66ca7-4a4c-4c99-a284-f089966eaeb1"). InnerVolumeSpecName "kube-api-access-dwv9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.326536 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8b66ca7-4a4c-4c99-a284-f089966eaeb1" (UID: "d8b66ca7-4a4c-4c99-a284-f089966eaeb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.341322 4842 generic.go:334] "Generic (PLEG): container finished" podID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerID="ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279" exitCode=0 Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.342339 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjjfl" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.343006 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerDied","Data":"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279"} Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.343174 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjjfl" event={"ID":"d8b66ca7-4a4c-4c99-a284-f089966eaeb1","Type":"ContainerDied","Data":"930c7d2103fa23462edfb49154397af2980134c4206f5221e1f483d2d6e75135"} Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.343211 4842 scope.go:117] "RemoveContainer" containerID="ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.378171 4842 scope.go:117] "RemoveContainer" containerID="b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.393749 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.400446 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.411168 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjjfl"] Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.412487 4842 scope.go:117] "RemoveContainer" containerID="73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.414069 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwv9v\" (UniqueName: \"kubernetes.io/projected/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-kube-api-access-dwv9v\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.414085 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.414164 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b66ca7-4a4c-4c99-a284-f089966eaeb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.473661 4842 scope.go:117] "RemoveContainer" containerID="ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279" Nov 11 14:49:41 crc kubenswrapper[4842]: E1111 14:49:41.474013 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279\": container with ID starting with ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279 not found: ID does not exist" containerID="ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.474047 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279"} err="failed to get container status \"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279\": rpc error: code = NotFound desc = could not find container \"ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279\": container with ID starting with ce765bab1c556351534c1522b1983453b26e2cfe72dad591de0b9bee7b7d7279 not found: ID does not exist" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.474071 4842 scope.go:117] "RemoveContainer" containerID="b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685" Nov 11 14:49:41 crc kubenswrapper[4842]: E1111 14:49:41.474277 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685\": container with ID starting with b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685 not found: ID does not exist" containerID="b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.474297 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685"} err="failed to get container status \"b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685\": rpc error: code = NotFound desc = could not find container \"b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685\": container with ID starting with b0b830af15ed1bac65182dfb5eea52c0799082c132ff7ac1c2961a86b2a3a685 not found: ID does not exist" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.474309 4842 scope.go:117] "RemoveContainer" containerID="73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e" Nov 11 14:49:41 crc kubenswrapper[4842]: E1111 14:49:41.474461 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e\": container with ID starting with 73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e not found: ID does not exist" containerID="73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e" Nov 11 14:49:41 crc kubenswrapper[4842]: I1111 14:49:41.474481 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e"} err="failed to get container status \"73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e\": rpc error: code = NotFound desc = could not find container \"73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e\": container with ID starting with 73d92d05cfebb4a762943257cc8dd49eb15c25d615911be31fbb6d9d3a98f60e not found: ID does not exist" Nov 11 14:49:42 crc kubenswrapper[4842]: I1111 14:49:42.072640 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:49:42 crc kubenswrapper[4842]: E1111 14:49:42.073050 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:49:42 crc kubenswrapper[4842]: I1111 14:49:42.082646 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" path="/var/lib/kubelet/pods/d8b66ca7-4a4c-4c99-a284-f089966eaeb1/volumes" Nov 11 14:49:43 crc kubenswrapper[4842]: I1111 14:49:43.516223 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:43 crc kubenswrapper[4842]: I1111 14:49:43.517148 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qqxkm" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="registry-server" containerID="cri-o://d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4" gracePeriod=2 Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.022086 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.181492 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities\") pod \"334e2292-62a5-40e7-bdca-bd720d30d3a3\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.181669 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9l89\" (UniqueName: \"kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89\") pod \"334e2292-62a5-40e7-bdca-bd720d30d3a3\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.181694 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content\") pod \"334e2292-62a5-40e7-bdca-bd720d30d3a3\" (UID: \"334e2292-62a5-40e7-bdca-bd720d30d3a3\") " Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.182639 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities" (OuterVolumeSpecName: "utilities") pod "334e2292-62a5-40e7-bdca-bd720d30d3a3" (UID: "334e2292-62a5-40e7-bdca-bd720d30d3a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.226149 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "334e2292-62a5-40e7-bdca-bd720d30d3a3" (UID: "334e2292-62a5-40e7-bdca-bd720d30d3a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.282904 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.283254 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/334e2292-62a5-40e7-bdca-bd720d30d3a3-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.375496 4842 generic.go:334] "Generic (PLEG): container finished" podID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerID="d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4" exitCode=0 Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.375546 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerDied","Data":"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4"} Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.375585 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qqxkm" event={"ID":"334e2292-62a5-40e7-bdca-bd720d30d3a3","Type":"ContainerDied","Data":"61e50398a8effba313d2cf6e93fbe4fc3be1bdf468b73e48667d4d88e91c0028"} Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.375618 4842 scope.go:117] "RemoveContainer" containerID="d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.375619 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qqxkm" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.401338 4842 scope.go:117] "RemoveContainer" containerID="89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.658770 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89" (OuterVolumeSpecName: "kube-api-access-w9l89") pod "334e2292-62a5-40e7-bdca-bd720d30d3a3" (UID: "334e2292-62a5-40e7-bdca-bd720d30d3a3"). InnerVolumeSpecName "kube-api-access-w9l89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.674181 4842 scope.go:117] "RemoveContainer" containerID="1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.689510 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9l89\" (UniqueName: \"kubernetes.io/projected/334e2292-62a5-40e7-bdca-bd720d30d3a3-kube-api-access-w9l89\") on node \"crc\" DevicePath \"\"" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.758320 4842 scope.go:117] "RemoveContainer" containerID="d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4" Nov 11 14:49:44 crc kubenswrapper[4842]: E1111 14:49:44.758795 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4\": container with ID starting with d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4 not found: ID does not exist" containerID="d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.758863 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4"} err="failed to get container status \"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4\": rpc error: code = NotFound desc = could not find container \"d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4\": container with ID starting with d720a997042fb58b9651ccc09949355b37f4f984c83369cba729fc7385fb72f4 not found: ID does not exist" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.758919 4842 scope.go:117] "RemoveContainer" containerID="89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8" Nov 11 14:49:44 crc kubenswrapper[4842]: E1111 14:49:44.759279 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8\": container with ID starting with 89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8 not found: ID does not exist" containerID="89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.759314 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8"} err="failed to get container status \"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8\": rpc error: code = NotFound desc = could not find container \"89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8\": container with ID starting with 89c43a362256d918fa1752ce7d809d7e511dd95d8d752f9f2249acacf8e5a4f8 not found: ID does not exist" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.759338 4842 scope.go:117] "RemoveContainer" containerID="1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78" Nov 11 14:49:44 crc kubenswrapper[4842]: E1111 14:49:44.759696 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78\": container with ID starting with 1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78 not found: ID does not exist" containerID="1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.759749 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78"} err="failed to get container status \"1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78\": rpc error: code = NotFound desc = could not find container \"1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78\": container with ID starting with 1bc4a9da85235efdfd56bc9349c47b40003374038c20c5da525245868928ba78 not found: ID does not exist" Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.815905 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:44 crc kubenswrapper[4842]: I1111 14:49:44.823026 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qqxkm"] Nov 11 14:49:46 crc kubenswrapper[4842]: I1111 14:49:46.073851 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" path="/var/lib/kubelet/pods/334e2292-62a5-40e7-bdca-bd720d30d3a3/volumes" Nov 11 14:49:54 crc kubenswrapper[4842]: I1111 14:49:54.064311 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:49:54 crc kubenswrapper[4842]: E1111 14:49:54.065165 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:50:07 crc kubenswrapper[4842]: I1111 14:50:07.059713 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:50:07 crc kubenswrapper[4842]: E1111 14:50:07.060808 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:50:18 crc kubenswrapper[4842]: I1111 14:50:18.060090 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:50:18 crc kubenswrapper[4842]: E1111 14:50:18.060905 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:50:30 crc kubenswrapper[4842]: I1111 14:50:30.066221 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:50:30 crc kubenswrapper[4842]: E1111 14:50:30.067045 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:50:44 crc kubenswrapper[4842]: I1111 14:50:44.059898 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:50:44 crc kubenswrapper[4842]: E1111 14:50:44.061406 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:50:55 crc kubenswrapper[4842]: I1111 14:50:55.060170 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:50:56 crc kubenswrapper[4842]: I1111 14:50:56.063278 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd"} Nov 11 14:53:14 crc kubenswrapper[4842]: I1111 14:53:14.961131 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:53:14 crc kubenswrapper[4842]: I1111 14:53:14.962297 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:53:44 crc kubenswrapper[4842]: I1111 14:53:44.961149 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:53:44 crc kubenswrapper[4842]: I1111 14:53:44.961734 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:54:14 crc kubenswrapper[4842]: I1111 14:54:14.961144 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:54:14 crc kubenswrapper[4842]: I1111 14:54:14.962076 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:54:14 crc kubenswrapper[4842]: I1111 14:54:14.962218 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:54:14 crc kubenswrapper[4842]: I1111 14:54:14.963275 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:54:14 crc kubenswrapper[4842]: I1111 14:54:14.963351 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd" gracePeriod=600 Nov 11 14:54:16 crc kubenswrapper[4842]: I1111 14:54:16.017012 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd" exitCode=0 Nov 11 14:54:16 crc kubenswrapper[4842]: I1111 14:54:16.017089 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd"} Nov 11 14:54:16 crc kubenswrapper[4842]: I1111 14:54:16.017617 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c"} Nov 11 14:54:16 crc kubenswrapper[4842]: I1111 14:54:16.017650 4842 scope.go:117] "RemoveContainer" containerID="c2d17a3a39d603bc2c13a6e31be99d018783abefa88652758e0559711178f5a6" Nov 11 14:56:44 crc kubenswrapper[4842]: I1111 14:56:44.960806 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:56:44 crc kubenswrapper[4842]: I1111 14:56:44.961507 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.166452 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.170052 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.170321 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.170574 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="extract-utilities" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.170759 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="extract-utilities" Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.170964 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="extract-content" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.171150 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="extract-content" Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.171336 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.171480 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.171648 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="extract-utilities" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.171770 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="extract-utilities" Nov 11 14:56:50 crc kubenswrapper[4842]: E1111 14:56:50.171905 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="extract-content" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.172056 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="extract-content" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.172755 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="334e2292-62a5-40e7-bdca-bd720d30d3a3" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.173178 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8b66ca7-4a4c-4c99-a284-f089966eaeb1" containerName="registry-server" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.177841 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.177966 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.356625 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxfl2\" (UniqueName: \"kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.356979 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.357061 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.459228 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxfl2\" (UniqueName: \"kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.459306 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.459336 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.459865 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.460410 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.481872 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxfl2\" (UniqueName: \"kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2\") pod \"community-operators-7g4fr\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:50 crc kubenswrapper[4842]: I1111 14:56:50.507223 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:56:51 crc kubenswrapper[4842]: I1111 14:56:51.004749 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:56:51 crc kubenswrapper[4842]: I1111 14:56:51.612395 4842 generic.go:334] "Generic (PLEG): container finished" podID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerID="45cffe49f56fadce1c66e6e41c9a21ceb1b01fa8437c76d89202b60d2e3938f4" exitCode=0 Nov 11 14:56:51 crc kubenswrapper[4842]: I1111 14:56:51.612438 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerDied","Data":"45cffe49f56fadce1c66e6e41c9a21ceb1b01fa8437c76d89202b60d2e3938f4"} Nov 11 14:56:51 crc kubenswrapper[4842]: I1111 14:56:51.612710 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerStarted","Data":"5b3935851b696279c50bde0ce319c1c95ed6c207b0a5b74e1b1d152a97d116e4"} Nov 11 14:56:51 crc kubenswrapper[4842]: I1111 14:56:51.614602 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 14:56:53 crc kubenswrapper[4842]: I1111 14:56:53.632645 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerStarted","Data":"b678dba58ca37ca7be5c67333f6aadc3738fd7be9ebead35a55195b0ded2992c"} Nov 11 14:56:54 crc kubenswrapper[4842]: I1111 14:56:54.642456 4842 generic.go:334] "Generic (PLEG): container finished" podID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerID="b678dba58ca37ca7be5c67333f6aadc3738fd7be9ebead35a55195b0ded2992c" exitCode=0 Nov 11 14:56:54 crc kubenswrapper[4842]: I1111 14:56:54.642750 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerDied","Data":"b678dba58ca37ca7be5c67333f6aadc3738fd7be9ebead35a55195b0ded2992c"} Nov 11 14:56:55 crc kubenswrapper[4842]: I1111 14:56:55.653623 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerStarted","Data":"bfa83b074cd9801f3a6a112e86b7e8e9a49f5d66dc037d4745916652368ccc51"} Nov 11 14:56:55 crc kubenswrapper[4842]: I1111 14:56:55.667405 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7g4fr" podStartSLOduration=2.205322831 podStartE2EDuration="5.667387757s" podCreationTimestamp="2025-11-11 14:56:50 +0000 UTC" firstStartedPulling="2025-11-11 14:56:51.614356266 +0000 UTC m=+5222.274645875" lastFinishedPulling="2025-11-11 14:56:55.076421182 +0000 UTC m=+5225.736710801" observedRunningTime="2025-11-11 14:56:55.666508449 +0000 UTC m=+5226.326798068" watchObservedRunningTime="2025-11-11 14:56:55.667387757 +0000 UTC m=+5226.327677376" Nov 11 14:57:00 crc kubenswrapper[4842]: I1111 14:57:00.510496 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:00 crc kubenswrapper[4842]: I1111 14:57:00.513417 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:01 crc kubenswrapper[4842]: I1111 14:57:01.096822 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:01 crc kubenswrapper[4842]: I1111 14:57:01.160166 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:01 crc kubenswrapper[4842]: I1111 14:57:01.334135 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:57:02 crc kubenswrapper[4842]: I1111 14:57:02.723046 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7g4fr" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="registry-server" containerID="cri-o://bfa83b074cd9801f3a6a112e86b7e8e9a49f5d66dc037d4745916652368ccc51" gracePeriod=2 Nov 11 14:57:03 crc kubenswrapper[4842]: I1111 14:57:03.735266 4842 generic.go:334] "Generic (PLEG): container finished" podID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerID="bfa83b074cd9801f3a6a112e86b7e8e9a49f5d66dc037d4745916652368ccc51" exitCode=0 Nov 11 14:57:03 crc kubenswrapper[4842]: I1111 14:57:03.735654 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerDied","Data":"bfa83b074cd9801f3a6a112e86b7e8e9a49f5d66dc037d4745916652368ccc51"} Nov 11 14:57:03 crc kubenswrapper[4842]: I1111 14:57:03.956874 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.087174 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities\") pod \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.088007 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxfl2\" (UniqueName: \"kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2\") pod \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.088031 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content\") pod \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\" (UID: \"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb\") " Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.088639 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities" (OuterVolumeSpecName: "utilities") pod "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" (UID: "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.094807 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2" (OuterVolumeSpecName: "kube-api-access-pxfl2") pod "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" (UID: "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb"). InnerVolumeSpecName "kube-api-access-pxfl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.131588 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" (UID: "cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.189915 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.189953 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxfl2\" (UniqueName: \"kubernetes.io/projected/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-kube-api-access-pxfl2\") on node \"crc\" DevicePath \"\"" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.189969 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.750545 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7g4fr" event={"ID":"cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb","Type":"ContainerDied","Data":"5b3935851b696279c50bde0ce319c1c95ed6c207b0a5b74e1b1d152a97d116e4"} Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.751117 4842 scope.go:117] "RemoveContainer" containerID="bfa83b074cd9801f3a6a112e86b7e8e9a49f5d66dc037d4745916652368ccc51" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.750662 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7g4fr" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.780051 4842 scope.go:117] "RemoveContainer" containerID="b678dba58ca37ca7be5c67333f6aadc3738fd7be9ebead35a55195b0ded2992c" Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.793387 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.802487 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7g4fr"] Nov 11 14:57:04 crc kubenswrapper[4842]: I1111 14:57:04.813694 4842 scope.go:117] "RemoveContainer" containerID="45cffe49f56fadce1c66e6e41c9a21ceb1b01fa8437c76d89202b60d2e3938f4" Nov 11 14:57:06 crc kubenswrapper[4842]: I1111 14:57:06.073580 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" path="/var/lib/kubelet/pods/cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb/volumes" Nov 11 14:57:14 crc kubenswrapper[4842]: I1111 14:57:14.960905 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:57:14 crc kubenswrapper[4842]: I1111 14:57:14.961804 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:57:44 crc kubenswrapper[4842]: I1111 14:57:44.961295 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 14:57:44 crc kubenswrapper[4842]: I1111 14:57:44.961948 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 14:57:44 crc kubenswrapper[4842]: I1111 14:57:44.962003 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 14:57:44 crc kubenswrapper[4842]: I1111 14:57:44.962906 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 14:57:44 crc kubenswrapper[4842]: I1111 14:57:44.962968 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" gracePeriod=600 Nov 11 14:57:45 crc kubenswrapper[4842]: E1111 14:57:45.087578 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:57:45 crc kubenswrapper[4842]: I1111 14:57:45.190443 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" exitCode=0 Nov 11 14:57:45 crc kubenswrapper[4842]: I1111 14:57:45.190515 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c"} Nov 11 14:57:45 crc kubenswrapper[4842]: I1111 14:57:45.190580 4842 scope.go:117] "RemoveContainer" containerID="22113e9475bc873a390d524a56312529e33a7680ebab825ea3e6a4eda586b0cd" Nov 11 14:57:45 crc kubenswrapper[4842]: I1111 14:57:45.191601 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:57:45 crc kubenswrapper[4842]: E1111 14:57:45.191933 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:57:56 crc kubenswrapper[4842]: I1111 14:57:56.059829 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:57:56 crc kubenswrapper[4842]: E1111 14:57:56.061156 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:58:07 crc kubenswrapper[4842]: I1111 14:58:07.059426 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:58:07 crc kubenswrapper[4842]: E1111 14:58:07.060585 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:58:20 crc kubenswrapper[4842]: I1111 14:58:20.066396 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:58:20 crc kubenswrapper[4842]: E1111 14:58:20.067072 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.059888 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:58:34 crc kubenswrapper[4842]: E1111 14:58:34.060828 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.726722 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:58:34 crc kubenswrapper[4842]: E1111 14:58:34.727250 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="registry-server" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.727274 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="registry-server" Nov 11 14:58:34 crc kubenswrapper[4842]: E1111 14:58:34.727318 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="extract-content" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.727326 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="extract-content" Nov 11 14:58:34 crc kubenswrapper[4842]: E1111 14:58:34.727347 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="extract-utilities" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.727356 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="extract-utilities" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.727607 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd3f9ae0-2710-4702-8dde-c1aa7e4c87cb" containerName="registry-server" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.729551 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.770348 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.872010 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.872167 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.872364 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtm7p\" (UniqueName: \"kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.974618 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtm7p\" (UniqueName: \"kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.974673 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.974724 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.975428 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:34 crc kubenswrapper[4842]: I1111 14:58:34.975433 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:35 crc kubenswrapper[4842]: I1111 14:58:35.001090 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtm7p\" (UniqueName: \"kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p\") pod \"redhat-operators-lvh8m\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:35 crc kubenswrapper[4842]: I1111 14:58:35.073248 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:35 crc kubenswrapper[4842]: I1111 14:58:35.623152 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:58:35 crc kubenswrapper[4842]: I1111 14:58:35.732597 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerStarted","Data":"1816f307ddbf35bbd9ad9ad03b44c1871635b1f37674e595c74a96b19bb61a53"} Nov 11 14:58:36 crc kubenswrapper[4842]: I1111 14:58:36.749270 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ded2799-d350-4804-9659-ce3d0167cffe" containerID="a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd" exitCode=0 Nov 11 14:58:36 crc kubenswrapper[4842]: I1111 14:58:36.749343 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerDied","Data":"a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd"} Nov 11 14:58:37 crc kubenswrapper[4842]: I1111 14:58:37.761478 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerStarted","Data":"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7"} Nov 11 14:58:41 crc kubenswrapper[4842]: I1111 14:58:41.808351 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ded2799-d350-4804-9659-ce3d0167cffe" containerID="5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7" exitCode=0 Nov 11 14:58:41 crc kubenswrapper[4842]: I1111 14:58:41.808434 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerDied","Data":"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7"} Nov 11 14:58:42 crc kubenswrapper[4842]: I1111 14:58:42.818430 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerStarted","Data":"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76"} Nov 11 14:58:45 crc kubenswrapper[4842]: I1111 14:58:45.073921 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:45 crc kubenswrapper[4842]: I1111 14:58:45.074238 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:58:46 crc kubenswrapper[4842]: I1111 14:58:46.588991 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lvh8m" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" probeResult="failure" output=< Nov 11 14:58:46 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 14:58:46 crc kubenswrapper[4842]: > Nov 11 14:58:47 crc kubenswrapper[4842]: I1111 14:58:47.059533 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:58:47 crc kubenswrapper[4842]: E1111 14:58:47.059891 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:58:56 crc kubenswrapper[4842]: I1111 14:58:56.319600 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lvh8m" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" probeResult="failure" output=< Nov 11 14:58:56 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 14:58:56 crc kubenswrapper[4842]: > Nov 11 14:59:02 crc kubenswrapper[4842]: I1111 14:59:02.085796 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:59:02 crc kubenswrapper[4842]: E1111 14:59:02.087191 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:59:05 crc kubenswrapper[4842]: I1111 14:59:05.121379 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:59:05 crc kubenswrapper[4842]: I1111 14:59:05.140359 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lvh8m" podStartSLOduration=25.498477726 podStartE2EDuration="31.140338322s" podCreationTimestamp="2025-11-11 14:58:34 +0000 UTC" firstStartedPulling="2025-11-11 14:58:36.753684347 +0000 UTC m=+5327.413973996" lastFinishedPulling="2025-11-11 14:58:42.395544973 +0000 UTC m=+5333.055834592" observedRunningTime="2025-11-11 14:58:42.838291536 +0000 UTC m=+5333.498581165" watchObservedRunningTime="2025-11-11 14:59:05.140338322 +0000 UTC m=+5355.800627941" Nov 11 14:59:05 crc kubenswrapper[4842]: I1111 14:59:05.173738 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:59:05 crc kubenswrapper[4842]: I1111 14:59:05.932372 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.046896 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lvh8m" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" containerID="cri-o://49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76" gracePeriod=2 Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.549216 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.703249 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content\") pod \"3ded2799-d350-4804-9659-ce3d0167cffe\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.703352 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtm7p\" (UniqueName: \"kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p\") pod \"3ded2799-d350-4804-9659-ce3d0167cffe\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.703420 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities\") pod \"3ded2799-d350-4804-9659-ce3d0167cffe\" (UID: \"3ded2799-d350-4804-9659-ce3d0167cffe\") " Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.704454 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities" (OuterVolumeSpecName: "utilities") pod "3ded2799-d350-4804-9659-ce3d0167cffe" (UID: "3ded2799-d350-4804-9659-ce3d0167cffe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.709839 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p" (OuterVolumeSpecName: "kube-api-access-jtm7p") pod "3ded2799-d350-4804-9659-ce3d0167cffe" (UID: "3ded2799-d350-4804-9659-ce3d0167cffe"). InnerVolumeSpecName "kube-api-access-jtm7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.786561 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ded2799-d350-4804-9659-ce3d0167cffe" (UID: "3ded2799-d350-4804-9659-ce3d0167cffe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.805682 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.805721 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtm7p\" (UniqueName: \"kubernetes.io/projected/3ded2799-d350-4804-9659-ce3d0167cffe-kube-api-access-jtm7p\") on node \"crc\" DevicePath \"\"" Nov 11 14:59:07 crc kubenswrapper[4842]: I1111 14:59:07.805732 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ded2799-d350-4804-9659-ce3d0167cffe-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.057228 4842 generic.go:334] "Generic (PLEG): container finished" podID="3ded2799-d350-4804-9659-ce3d0167cffe" containerID="49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76" exitCode=0 Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.057324 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvh8m" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.057314 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerDied","Data":"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76"} Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.057600 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvh8m" event={"ID":"3ded2799-d350-4804-9659-ce3d0167cffe","Type":"ContainerDied","Data":"1816f307ddbf35bbd9ad9ad03b44c1871635b1f37674e595c74a96b19bb61a53"} Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.057625 4842 scope.go:117] "RemoveContainer" containerID="49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.084454 4842 scope.go:117] "RemoveContainer" containerID="5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.104329 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.114678 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lvh8m"] Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.126280 4842 scope.go:117] "RemoveContainer" containerID="a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.163798 4842 scope.go:117] "RemoveContainer" containerID="49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76" Nov 11 14:59:08 crc kubenswrapper[4842]: E1111 14:59:08.164394 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76\": container with ID starting with 49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76 not found: ID does not exist" containerID="49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.164440 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76"} err="failed to get container status \"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76\": rpc error: code = NotFound desc = could not find container \"49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76\": container with ID starting with 49d722bc5d21551217d16bd5eeaf6794f96a047fd9748013a7937efaad564f76 not found: ID does not exist" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.164469 4842 scope.go:117] "RemoveContainer" containerID="5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7" Nov 11 14:59:08 crc kubenswrapper[4842]: E1111 14:59:08.164887 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7\": container with ID starting with 5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7 not found: ID does not exist" containerID="5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.164936 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7"} err="failed to get container status \"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7\": rpc error: code = NotFound desc = could not find container \"5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7\": container with ID starting with 5ca23f27af64ad0399ebe4bbe793e221d4b1aaf5bd4137ba9956ef19f22070d7 not found: ID does not exist" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.164965 4842 scope.go:117] "RemoveContainer" containerID="a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd" Nov 11 14:59:08 crc kubenswrapper[4842]: E1111 14:59:08.165458 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd\": container with ID starting with a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd not found: ID does not exist" containerID="a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd" Nov 11 14:59:08 crc kubenswrapper[4842]: I1111 14:59:08.165502 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd"} err="failed to get container status \"a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd\": rpc error: code = NotFound desc = could not find container \"a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd\": container with ID starting with a77ed0e3b0a1213b63f58e97205e6863974a11cabb7fa10a09696bcc2a5d68fd not found: ID does not exist" Nov 11 14:59:10 crc kubenswrapper[4842]: I1111 14:59:10.073396 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" path="/var/lib/kubelet/pods/3ded2799-d350-4804-9659-ce3d0167cffe/volumes" Nov 11 14:59:16 crc kubenswrapper[4842]: I1111 14:59:16.062162 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:59:16 crc kubenswrapper[4842]: E1111 14:59:16.063225 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:59:30 crc kubenswrapper[4842]: I1111 14:59:30.066225 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:59:30 crc kubenswrapper[4842]: E1111 14:59:30.067799 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:59:43 crc kubenswrapper[4842]: I1111 14:59:43.059443 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:59:43 crc kubenswrapper[4842]: E1111 14:59:43.060287 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 14:59:54 crc kubenswrapper[4842]: I1111 14:59:54.059621 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 14:59:54 crc kubenswrapper[4842]: E1111 14:59:54.060439 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.144455 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf"] Nov 11 15:00:00 crc kubenswrapper[4842]: E1111 15:00:00.145488 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="extract-utilities" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.145504 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="extract-utilities" Nov 11 15:00:00 crc kubenswrapper[4842]: E1111 15:00:00.145520 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="extract-content" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.145526 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="extract-content" Nov 11 15:00:00 crc kubenswrapper[4842]: E1111 15:00:00.145539 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.145545 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.145747 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ded2799-d350-4804-9659-ce3d0167cffe" containerName="registry-server" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.146556 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.149771 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.149892 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.165802 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf"] Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.343464 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.343599 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.343653 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzncp\" (UniqueName: \"kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.446323 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.446470 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzncp\" (UniqueName: \"kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.446604 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.447423 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.455208 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.466869 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzncp\" (UniqueName: \"kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp\") pod \"collect-profiles-29381220-wgmjf\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.469183 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:00 crc kubenswrapper[4842]: I1111 15:00:00.915006 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf"] Nov 11 15:00:01 crc kubenswrapper[4842]: W1111 15:00:01.461702 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa6ae95b_995e_48c2_8059_50208b6505f2.slice/crio-50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103 WatchSource:0}: Error finding container 50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103: Status 404 returned error can't find the container with id 50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103 Nov 11 15:00:01 crc kubenswrapper[4842]: I1111 15:00:01.597687 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" event={"ID":"aa6ae95b-995e-48c2-8059-50208b6505f2","Type":"ContainerStarted","Data":"50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103"} Nov 11 15:00:02 crc kubenswrapper[4842]: I1111 15:00:02.607950 4842 generic.go:334] "Generic (PLEG): container finished" podID="aa6ae95b-995e-48c2-8059-50208b6505f2" containerID="42e667b23b5617ffc5442c154d766339b8769c29bd6d858c04dbfa0d489df14f" exitCode=0 Nov 11 15:00:02 crc kubenswrapper[4842]: I1111 15:00:02.608005 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" event={"ID":"aa6ae95b-995e-48c2-8059-50208b6505f2","Type":"ContainerDied","Data":"42e667b23b5617ffc5442c154d766339b8769c29bd6d858c04dbfa0d489df14f"} Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.023134 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.126716 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume\") pod \"aa6ae95b-995e-48c2-8059-50208b6505f2\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.126958 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume\") pod \"aa6ae95b-995e-48c2-8059-50208b6505f2\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.127019 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzncp\" (UniqueName: \"kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp\") pod \"aa6ae95b-995e-48c2-8059-50208b6505f2\" (UID: \"aa6ae95b-995e-48c2-8059-50208b6505f2\") " Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.128666 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume" (OuterVolumeSpecName: "config-volume") pod "aa6ae95b-995e-48c2-8059-50208b6505f2" (UID: "aa6ae95b-995e-48c2-8059-50208b6505f2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.132660 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp" (OuterVolumeSpecName: "kube-api-access-kzncp") pod "aa6ae95b-995e-48c2-8059-50208b6505f2" (UID: "aa6ae95b-995e-48c2-8059-50208b6505f2"). InnerVolumeSpecName "kube-api-access-kzncp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.139207 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "aa6ae95b-995e-48c2-8059-50208b6505f2" (UID: "aa6ae95b-995e-48c2-8059-50208b6505f2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.229747 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzncp\" (UniqueName: \"kubernetes.io/projected/aa6ae95b-995e-48c2-8059-50208b6505f2-kube-api-access-kzncp\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.229775 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aa6ae95b-995e-48c2-8059-50208b6505f2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.229783 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aa6ae95b-995e-48c2-8059-50208b6505f2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.628245 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" event={"ID":"aa6ae95b-995e-48c2-8059-50208b6505f2","Type":"ContainerDied","Data":"50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103"} Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.628540 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50c8087abad827b8fd4d1c49d27b58f110a21defe5afab8bcd284e5a42cbb103" Nov 11 15:00:04 crc kubenswrapper[4842]: I1111 15:00:04.628323 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381220-wgmjf" Nov 11 15:00:05 crc kubenswrapper[4842]: I1111 15:00:05.105247 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn"] Nov 11 15:00:05 crc kubenswrapper[4842]: I1111 15:00:05.115713 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381175-krcnn"] Nov 11 15:00:06 crc kubenswrapper[4842]: I1111 15:00:06.074229 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f392a4d4-8117-4b08-8e41-4d92d5d10a27" path="/var/lib/kubelet/pods/f392a4d4-8117-4b08-8e41-4d92d5d10a27/volumes" Nov 11 15:00:07 crc kubenswrapper[4842]: I1111 15:00:07.059466 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:00:07 crc kubenswrapper[4842]: E1111 15:00:07.060047 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:00:19 crc kubenswrapper[4842]: I1111 15:00:19.059410 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:00:19 crc kubenswrapper[4842]: E1111 15:00:19.061092 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.641415 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:27 crc kubenswrapper[4842]: E1111 15:00:27.642834 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa6ae95b-995e-48c2-8059-50208b6505f2" containerName="collect-profiles" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.642852 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa6ae95b-995e-48c2-8059-50208b6505f2" containerName="collect-profiles" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.643448 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa6ae95b-995e-48c2-8059-50208b6505f2" containerName="collect-profiles" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.646074 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.652758 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.743812 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.744138 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrpkl\" (UniqueName: \"kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.744374 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.846683 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.846801 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.846851 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrpkl\" (UniqueName: \"kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.847197 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.847206 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.882228 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrpkl\" (UniqueName: \"kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl\") pod \"certified-operators-59wrh\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:27 crc kubenswrapper[4842]: I1111 15:00:27.967956 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:28 crc kubenswrapper[4842]: I1111 15:00:28.444786 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:28 crc kubenswrapper[4842]: W1111 15:00:28.448581 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd77ceb67_639a_4ea4_ac7b_3d6055abf69e.slice/crio-dd5fe6b2610e820848d1c9792ee674729d51d58a151b0cfaa0e5e52fd5e81c13 WatchSource:0}: Error finding container dd5fe6b2610e820848d1c9792ee674729d51d58a151b0cfaa0e5e52fd5e81c13: Status 404 returned error can't find the container with id dd5fe6b2610e820848d1c9792ee674729d51d58a151b0cfaa0e5e52fd5e81c13 Nov 11 15:00:28 crc kubenswrapper[4842]: I1111 15:00:28.852932 4842 generic.go:334] "Generic (PLEG): container finished" podID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerID="2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea" exitCode=0 Nov 11 15:00:28 crc kubenswrapper[4842]: I1111 15:00:28.853002 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerDied","Data":"2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea"} Nov 11 15:00:28 crc kubenswrapper[4842]: I1111 15:00:28.853392 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerStarted","Data":"dd5fe6b2610e820848d1c9792ee674729d51d58a151b0cfaa0e5e52fd5e81c13"} Nov 11 15:00:29 crc kubenswrapper[4842]: I1111 15:00:29.879423 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerStarted","Data":"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b"} Nov 11 15:00:30 crc kubenswrapper[4842]: I1111 15:00:30.897293 4842 generic.go:334] "Generic (PLEG): container finished" podID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerID="fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b" exitCode=0 Nov 11 15:00:30 crc kubenswrapper[4842]: I1111 15:00:30.897388 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerDied","Data":"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b"} Nov 11 15:00:31 crc kubenswrapper[4842]: I1111 15:00:31.908731 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerStarted","Data":"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109"} Nov 11 15:00:32 crc kubenswrapper[4842]: I1111 15:00:32.059197 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:00:32 crc kubenswrapper[4842]: E1111 15:00:32.059494 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:00:37 crc kubenswrapper[4842]: I1111 15:00:37.968514 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:37 crc kubenswrapper[4842]: I1111 15:00:37.969083 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:38 crc kubenswrapper[4842]: I1111 15:00:38.019180 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:38 crc kubenswrapper[4842]: I1111 15:00:38.043142 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-59wrh" podStartSLOduration=8.486740483 podStartE2EDuration="11.043119716s" podCreationTimestamp="2025-11-11 15:00:27 +0000 UTC" firstStartedPulling="2025-11-11 15:00:28.855525584 +0000 UTC m=+5439.515815193" lastFinishedPulling="2025-11-11 15:00:31.411904807 +0000 UTC m=+5442.072194426" observedRunningTime="2025-11-11 15:00:31.925538111 +0000 UTC m=+5442.585827750" watchObservedRunningTime="2025-11-11 15:00:38.043119716 +0000 UTC m=+5448.703409335" Nov 11 15:00:39 crc kubenswrapper[4842]: I1111 15:00:39.053808 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:39 crc kubenswrapper[4842]: I1111 15:00:39.107585 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.700174 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.702278 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.714329 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.812744 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.812831 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.813084 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgts8\" (UniqueName: \"kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.915147 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgts8\" (UniqueName: \"kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.915326 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.915371 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.915911 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.916515 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:40 crc kubenswrapper[4842]: I1111 15:00:40.936345 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgts8\" (UniqueName: \"kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8\") pod \"redhat-marketplace-qgwrl\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.021342 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-59wrh" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="registry-server" containerID="cri-o://78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109" gracePeriod=2 Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.022190 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.517231 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.593318 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.732839 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrpkl\" (UniqueName: \"kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl\") pod \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.732990 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities\") pod \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.733127 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content\") pod \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\" (UID: \"d77ceb67-639a-4ea4-ac7b-3d6055abf69e\") " Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.733970 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities" (OuterVolumeSpecName: "utilities") pod "d77ceb67-639a-4ea4-ac7b-3d6055abf69e" (UID: "d77ceb67-639a-4ea4-ac7b-3d6055abf69e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.742178 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl" (OuterVolumeSpecName: "kube-api-access-lrpkl") pod "d77ceb67-639a-4ea4-ac7b-3d6055abf69e" (UID: "d77ceb67-639a-4ea4-ac7b-3d6055abf69e"). InnerVolumeSpecName "kube-api-access-lrpkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.773300 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d77ceb67-639a-4ea4-ac7b-3d6055abf69e" (UID: "d77ceb67-639a-4ea4-ac7b-3d6055abf69e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.835076 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.835130 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrpkl\" (UniqueName: \"kubernetes.io/projected/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-kube-api-access-lrpkl\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:41 crc kubenswrapper[4842]: I1111 15:00:41.835145 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77ceb67-639a-4ea4-ac7b-3d6055abf69e-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.030402 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerID="5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8" exitCode=0 Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.030506 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerDied","Data":"5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8"} Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.030716 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerStarted","Data":"1a4c2bc8128755aadc18356ea2447224463b5c97b286fc6d4766a157e5fa6b56"} Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.035478 4842 generic.go:334] "Generic (PLEG): container finished" podID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerID="78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109" exitCode=0 Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.035528 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerDied","Data":"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109"} Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.035553 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-59wrh" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.035565 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-59wrh" event={"ID":"d77ceb67-639a-4ea4-ac7b-3d6055abf69e","Type":"ContainerDied","Data":"dd5fe6b2610e820848d1c9792ee674729d51d58a151b0cfaa0e5e52fd5e81c13"} Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.035587 4842 scope.go:117] "RemoveContainer" containerID="78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.060502 4842 scope.go:117] "RemoveContainer" containerID="fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.077281 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.084549 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-59wrh"] Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.099010 4842 scope.go:117] "RemoveContainer" containerID="2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.133325 4842 scope.go:117] "RemoveContainer" containerID="78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109" Nov 11 15:00:42 crc kubenswrapper[4842]: E1111 15:00:42.133781 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109\": container with ID starting with 78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109 not found: ID does not exist" containerID="78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.133886 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109"} err="failed to get container status \"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109\": rpc error: code = NotFound desc = could not find container \"78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109\": container with ID starting with 78de0e3904e52010c21894014e4b48f8d94246c2bbcd6330d41c06927b4be109 not found: ID does not exist" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.133966 4842 scope.go:117] "RemoveContainer" containerID="fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b" Nov 11 15:00:42 crc kubenswrapper[4842]: E1111 15:00:42.134389 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b\": container with ID starting with fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b not found: ID does not exist" containerID="fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.134412 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b"} err="failed to get container status \"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b\": rpc error: code = NotFound desc = could not find container \"fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b\": container with ID starting with fe5d9b2f1a0d795af4eb09fa36a5d9ca1652759f2d4f892f87fdaecbd5b8361b not found: ID does not exist" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.134426 4842 scope.go:117] "RemoveContainer" containerID="2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea" Nov 11 15:00:42 crc kubenswrapper[4842]: E1111 15:00:42.134822 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea\": container with ID starting with 2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea not found: ID does not exist" containerID="2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea" Nov 11 15:00:42 crc kubenswrapper[4842]: I1111 15:00:42.134900 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea"} err="failed to get container status \"2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea\": rpc error: code = NotFound desc = could not find container \"2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea\": container with ID starting with 2ce74e35fde7dbb618e3249d760a8e209a8e83c59053eae5b273fad2372198ea not found: ID does not exist" Nov 11 15:00:43 crc kubenswrapper[4842]: I1111 15:00:43.047609 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerStarted","Data":"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0"} Nov 11 15:00:44 crc kubenswrapper[4842]: I1111 15:00:44.058540 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerID="88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0" exitCode=0 Nov 11 15:00:44 crc kubenswrapper[4842]: I1111 15:00:44.071471 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" path="/var/lib/kubelet/pods/d77ceb67-639a-4ea4-ac7b-3d6055abf69e/volumes" Nov 11 15:00:44 crc kubenswrapper[4842]: I1111 15:00:44.072132 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerDied","Data":"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0"} Nov 11 15:00:45 crc kubenswrapper[4842]: I1111 15:00:45.089841 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerStarted","Data":"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101"} Nov 11 15:00:45 crc kubenswrapper[4842]: I1111 15:00:45.120551 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qgwrl" podStartSLOduration=2.6303900970000003 podStartE2EDuration="5.120531308s" podCreationTimestamp="2025-11-11 15:00:40 +0000 UTC" firstStartedPulling="2025-11-11 15:00:42.032585317 +0000 UTC m=+5452.692874936" lastFinishedPulling="2025-11-11 15:00:44.522726488 +0000 UTC m=+5455.183016147" observedRunningTime="2025-11-11 15:00:45.112255697 +0000 UTC m=+5455.772545326" watchObservedRunningTime="2025-11-11 15:00:45.120531308 +0000 UTC m=+5455.780820927" Nov 11 15:00:47 crc kubenswrapper[4842]: I1111 15:00:47.059292 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:00:47 crc kubenswrapper[4842]: E1111 15:00:47.059926 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:00:51 crc kubenswrapper[4842]: I1111 15:00:51.022537 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:51 crc kubenswrapper[4842]: I1111 15:00:51.023057 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:51 crc kubenswrapper[4842]: I1111 15:00:51.078401 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:51 crc kubenswrapper[4842]: I1111 15:00:51.192687 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:51 crc kubenswrapper[4842]: I1111 15:00:51.310171 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.162471 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qgwrl" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="registry-server" containerID="cri-o://6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101" gracePeriod=2 Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.662915 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.764926 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgts8\" (UniqueName: \"kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8\") pod \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.765115 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities\") pod \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.765404 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content\") pod \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\" (UID: \"a7d897c9-3e74-4c11-8a32-7f810a7a6e83\") " Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.768430 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities" (OuterVolumeSpecName: "utilities") pod "a7d897c9-3e74-4c11-8a32-7f810a7a6e83" (UID: "a7d897c9-3e74-4c11-8a32-7f810a7a6e83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.776377 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8" (OuterVolumeSpecName: "kube-api-access-kgts8") pod "a7d897c9-3e74-4c11-8a32-7f810a7a6e83" (UID: "a7d897c9-3e74-4c11-8a32-7f810a7a6e83"). InnerVolumeSpecName "kube-api-access-kgts8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.783748 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7d897c9-3e74-4c11-8a32-7f810a7a6e83" (UID: "a7d897c9-3e74-4c11-8a32-7f810a7a6e83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.868338 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.868380 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgts8\" (UniqueName: \"kubernetes.io/projected/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-kube-api-access-kgts8\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:53 crc kubenswrapper[4842]: I1111 15:00:53.868395 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d897c9-3e74-4c11-8a32-7f810a7a6e83-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.178899 4842 generic.go:334] "Generic (PLEG): container finished" podID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerID="6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101" exitCode=0 Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.178972 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerDied","Data":"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101"} Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.179011 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qgwrl" event={"ID":"a7d897c9-3e74-4c11-8a32-7f810a7a6e83","Type":"ContainerDied","Data":"1a4c2bc8128755aadc18356ea2447224463b5c97b286fc6d4766a157e5fa6b56"} Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.179034 4842 scope.go:117] "RemoveContainer" containerID="6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.179031 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qgwrl" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.212873 4842 scope.go:117] "RemoveContainer" containerID="88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.213801 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.225663 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qgwrl"] Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.246009 4842 scope.go:117] "RemoveContainer" containerID="5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.287853 4842 scope.go:117] "RemoveContainer" containerID="6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101" Nov 11 15:00:54 crc kubenswrapper[4842]: E1111 15:00:54.288575 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101\": container with ID starting with 6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101 not found: ID does not exist" containerID="6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.288621 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101"} err="failed to get container status \"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101\": rpc error: code = NotFound desc = could not find container \"6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101\": container with ID starting with 6732b61a49b19475ef2732b6cc30d14589f5bd58b18f874ee317075bffa42101 not found: ID does not exist" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.288649 4842 scope.go:117] "RemoveContainer" containerID="88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0" Nov 11 15:00:54 crc kubenswrapper[4842]: E1111 15:00:54.289203 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0\": container with ID starting with 88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0 not found: ID does not exist" containerID="88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.289242 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0"} err="failed to get container status \"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0\": rpc error: code = NotFound desc = could not find container \"88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0\": container with ID starting with 88a5f73a258931012f43dc580f73c9b416be6a6bd98fe40ac3917042d90660a0 not found: ID does not exist" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.289261 4842 scope.go:117] "RemoveContainer" containerID="5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8" Nov 11 15:00:54 crc kubenswrapper[4842]: E1111 15:00:54.289624 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8\": container with ID starting with 5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8 not found: ID does not exist" containerID="5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8" Nov 11 15:00:54 crc kubenswrapper[4842]: I1111 15:00:54.289696 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8"} err="failed to get container status \"5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8\": rpc error: code = NotFound desc = could not find container \"5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8\": container with ID starting with 5d88b2c6d5906486237e6065cd2f99e4f29179f4531a31a5831088095d88eff8 not found: ID does not exist" Nov 11 15:00:56 crc kubenswrapper[4842]: I1111 15:00:56.070840 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" path="/var/lib/kubelet/pods/a7d897c9-3e74-4c11-8a32-7f810a7a6e83/volumes" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.075815 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.076940 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149203 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29381221-6dgbk"] Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149604 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149622 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149645 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="extract-content" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149653 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="extract-content" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149669 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149676 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149696 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="extract-utilities" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149703 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="extract-utilities" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149717 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="extract-utilities" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149722 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="extract-utilities" Nov 11 15:01:00 crc kubenswrapper[4842]: E1111 15:01:00.149739 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="extract-content" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149744 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="extract-content" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149934 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="d77ceb67-639a-4ea4-ac7b-3d6055abf69e" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.149956 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d897c9-3e74-4c11-8a32-7f810a7a6e83" containerName="registry-server" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.150634 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.165736 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29381221-6dgbk"] Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.198427 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.198476 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.198498 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.199052 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mc5h\" (UniqueName: \"kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.303720 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mc5h\" (UniqueName: \"kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.304316 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.305376 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.305489 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.313270 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.313416 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.313619 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.323555 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mc5h\" (UniqueName: \"kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h\") pod \"keystone-cron-29381221-6dgbk\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.479801 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:00 crc kubenswrapper[4842]: I1111 15:01:00.945441 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29381221-6dgbk"] Nov 11 15:01:01 crc kubenswrapper[4842]: I1111 15:01:01.249388 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381221-6dgbk" event={"ID":"5b5757ad-60d5-4159-a919-b2b784bd5072","Type":"ContainerStarted","Data":"6a583c0740f2e48fa301e22bb624a315dcf8d952c6012dad3636ef69920f2c98"} Nov 11 15:01:01 crc kubenswrapper[4842]: I1111 15:01:01.249725 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381221-6dgbk" event={"ID":"5b5757ad-60d5-4159-a919-b2b784bd5072","Type":"ContainerStarted","Data":"1ed6143043e5d881d1d170ca4f48e6df57aaf0f91502bd0d5b3d639da8ba1e30"} Nov 11 15:01:01 crc kubenswrapper[4842]: I1111 15:01:01.269828 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29381221-6dgbk" podStartSLOduration=1.269810881 podStartE2EDuration="1.269810881s" podCreationTimestamp="2025-11-11 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 15:01:01.264631998 +0000 UTC m=+5471.924921637" watchObservedRunningTime="2025-11-11 15:01:01.269810881 +0000 UTC m=+5471.930100500" Nov 11 15:01:02 crc kubenswrapper[4842]: I1111 15:01:02.191535 4842 scope.go:117] "RemoveContainer" containerID="a616b45a044341f2dd5a85cc93c5a6587b6428a6b295b8d264680ba37a4605b7" Nov 11 15:01:05 crc kubenswrapper[4842]: I1111 15:01:05.292130 4842 generic.go:334] "Generic (PLEG): container finished" podID="5b5757ad-60d5-4159-a919-b2b784bd5072" containerID="6a583c0740f2e48fa301e22bb624a315dcf8d952c6012dad3636ef69920f2c98" exitCode=0 Nov 11 15:01:05 crc kubenswrapper[4842]: I1111 15:01:05.292186 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381221-6dgbk" event={"ID":"5b5757ad-60d5-4159-a919-b2b784bd5072","Type":"ContainerDied","Data":"6a583c0740f2e48fa301e22bb624a315dcf8d952c6012dad3636ef69920f2c98"} Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.656615 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.756582 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data\") pod \"5b5757ad-60d5-4159-a919-b2b784bd5072\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.756674 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mc5h\" (UniqueName: \"kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h\") pod \"5b5757ad-60d5-4159-a919-b2b784bd5072\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.756730 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle\") pod \"5b5757ad-60d5-4159-a919-b2b784bd5072\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.756853 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys\") pod \"5b5757ad-60d5-4159-a919-b2b784bd5072\" (UID: \"5b5757ad-60d5-4159-a919-b2b784bd5072\") " Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.762262 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5b5757ad-60d5-4159-a919-b2b784bd5072" (UID: "5b5757ad-60d5-4159-a919-b2b784bd5072"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.762960 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h" (OuterVolumeSpecName: "kube-api-access-7mc5h") pod "5b5757ad-60d5-4159-a919-b2b784bd5072" (UID: "5b5757ad-60d5-4159-a919-b2b784bd5072"). InnerVolumeSpecName "kube-api-access-7mc5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.786639 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b5757ad-60d5-4159-a919-b2b784bd5072" (UID: "5b5757ad-60d5-4159-a919-b2b784bd5072"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.814242 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data" (OuterVolumeSpecName: "config-data") pod "5b5757ad-60d5-4159-a919-b2b784bd5072" (UID: "5b5757ad-60d5-4159-a919-b2b784bd5072"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.859820 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.859856 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mc5h\" (UniqueName: \"kubernetes.io/projected/5b5757ad-60d5-4159-a919-b2b784bd5072-kube-api-access-7mc5h\") on node \"crc\" DevicePath \"\"" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.859867 4842 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 11 15:01:06 crc kubenswrapper[4842]: I1111 15:01:06.859876 4842 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5b5757ad-60d5-4159-a919-b2b784bd5072-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 11 15:01:07 crc kubenswrapper[4842]: I1111 15:01:07.311931 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29381221-6dgbk" event={"ID":"5b5757ad-60d5-4159-a919-b2b784bd5072","Type":"ContainerDied","Data":"1ed6143043e5d881d1d170ca4f48e6df57aaf0f91502bd0d5b3d639da8ba1e30"} Nov 11 15:01:07 crc kubenswrapper[4842]: I1111 15:01:07.311971 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ed6143043e5d881d1d170ca4f48e6df57aaf0f91502bd0d5b3d639da8ba1e30" Nov 11 15:01:07 crc kubenswrapper[4842]: I1111 15:01:07.311979 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29381221-6dgbk" Nov 11 15:01:13 crc kubenswrapper[4842]: I1111 15:01:13.059031 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:01:13 crc kubenswrapper[4842]: E1111 15:01:13.059587 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:01:26 crc kubenswrapper[4842]: I1111 15:01:26.060423 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:01:26 crc kubenswrapper[4842]: E1111 15:01:26.061311 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:01:40 crc kubenswrapper[4842]: I1111 15:01:40.073818 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:01:40 crc kubenswrapper[4842]: E1111 15:01:40.074876 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:01:52 crc kubenswrapper[4842]: I1111 15:01:52.059843 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:01:52 crc kubenswrapper[4842]: E1111 15:01:52.061586 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:02:04 crc kubenswrapper[4842]: I1111 15:02:04.059353 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:02:04 crc kubenswrapper[4842]: E1111 15:02:04.060344 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:02:18 crc kubenswrapper[4842]: I1111 15:02:18.068352 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:02:18 crc kubenswrapper[4842]: E1111 15:02:18.069108 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:02:31 crc kubenswrapper[4842]: I1111 15:02:31.059822 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:02:31 crc kubenswrapper[4842]: E1111 15:02:31.061226 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:02:45 crc kubenswrapper[4842]: I1111 15:02:45.059704 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:02:45 crc kubenswrapper[4842]: I1111 15:02:45.302477 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab"} Nov 11 15:05:14 crc kubenswrapper[4842]: I1111 15:05:14.960888 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:05:14 crc kubenswrapper[4842]: I1111 15:05:14.961650 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:05:44 crc kubenswrapper[4842]: I1111 15:05:44.960904 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:05:44 crc kubenswrapper[4842]: I1111 15:05:44.961534 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:06:13 crc kubenswrapper[4842]: I1111 15:06:13.372843 4842 generic.go:334] "Generic (PLEG): container finished" podID="42e4762f-5636-4ea5-914b-142ccc708e6d" containerID="1b49f23b7a1bdbf364349be3fcb6f14dd7429b6aabfab807669c9b5edb1a5f84" exitCode=0 Nov 11 15:06:13 crc kubenswrapper[4842]: I1111 15:06:13.372954 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"42e4762f-5636-4ea5-914b-142ccc708e6d","Type":"ContainerDied","Data":"1b49f23b7a1bdbf364349be3fcb6f14dd7429b6aabfab807669c9b5edb1a5f84"} Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.824206 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.857705 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.857866 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858064 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bg25\" (UniqueName: \"kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858451 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858574 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858578 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858617 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.858758 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.859199 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data" (OuterVolumeSpecName: "config-data") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.860082 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.860244 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"42e4762f-5636-4ea5-914b-142ccc708e6d\" (UID: \"42e4762f-5636-4ea5-914b-142ccc708e6d\") " Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.862636 4842 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.862744 4842 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-config-data\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.865703 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.878685 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25" (OuterVolumeSpecName: "kube-api-access-4bg25") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "kube-api-access-4bg25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.891484 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "test-operator-logs") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.908038 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.914996 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.923520 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.931974 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "42e4762f-5636-4ea5-914b-142ccc708e6d" (UID: "42e4762f-5636-4ea5-914b-142ccc708e6d"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.960837 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.960898 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.960940 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.961714 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.961780 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab" gracePeriod=600 Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.965973 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bg25\" (UniqueName: \"kubernetes.io/projected/42e4762f-5636-4ea5-914b-142ccc708e6d-kube-api-access-4bg25\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966023 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966035 4842 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/42e4762f-5636-4ea5-914b-142ccc708e6d-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966055 4842 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966070 4842 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966086 4842 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/42e4762f-5636-4ea5-914b-142ccc708e6d-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:14 crc kubenswrapper[4842]: I1111 15:06:14.966165 4842 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.008696 4842 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.068565 4842 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.391570 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"42e4762f-5636-4ea5-914b-142ccc708e6d","Type":"ContainerDied","Data":"1c5ceae2b0a02da90d37137501a505192a38dd3e16745cd8f93da2581da2bc59"} Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.392041 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c5ceae2b0a02da90d37137501a505192a38dd3e16745cd8f93da2581da2bc59" Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.391611 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.394875 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab" exitCode=0 Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.394921 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab"} Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.394947 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c"} Nov 11 15:06:15 crc kubenswrapper[4842]: I1111 15:06:15.394967 4842 scope.go:117] "RemoveContainer" containerID="7b7ff1946662871239b39aea2f7abeb5557f18d3c7b5bd414f65e2599fe7800c" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.665660 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 11 15:06:17 crc kubenswrapper[4842]: E1111 15:06:17.666613 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b5757ad-60d5-4159-a919-b2b784bd5072" containerName="keystone-cron" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.666625 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b5757ad-60d5-4159-a919-b2b784bd5072" containerName="keystone-cron" Nov 11 15:06:17 crc kubenswrapper[4842]: E1111 15:06:17.666666 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42e4762f-5636-4ea5-914b-142ccc708e6d" containerName="tempest-tests-tempest-tests-runner" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.666673 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="42e4762f-5636-4ea5-914b-142ccc708e6d" containerName="tempest-tests-tempest-tests-runner" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.666865 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="42e4762f-5636-4ea5-914b-142ccc708e6d" containerName="tempest-tests-tempest-tests-runner" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.666881 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b5757ad-60d5-4159-a919-b2b784bd5072" containerName="keystone-cron" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.667644 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.671044 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmr8c" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.679927 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.817685 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.817836 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rstdz\" (UniqueName: \"kubernetes.io/projected/9103e703-2527-4102-948b-a6f7e05b2e5a-kube-api-access-rstdz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.919552 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.919755 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rstdz\" (UniqueName: \"kubernetes.io/projected/9103e703-2527-4102-948b-a6f7e05b2e5a-kube-api-access-rstdz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.920269 4842 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.943176 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rstdz\" (UniqueName: \"kubernetes.io/projected/9103e703-2527-4102-948b-a6f7e05b2e5a-kube-api-access-rstdz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.969259 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9103e703-2527-4102-948b-a6f7e05b2e5a\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:17 crc kubenswrapper[4842]: I1111 15:06:17.993630 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 11 15:06:18 crc kubenswrapper[4842]: I1111 15:06:18.512207 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 11 15:06:18 crc kubenswrapper[4842]: W1111 15:06:18.518232 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9103e703_2527_4102_948b_a6f7e05b2e5a.slice/crio-a85bf232fbe6c36101138c1d566fb27549553b81ef2a6df41ac11a1f46766039 WatchSource:0}: Error finding container a85bf232fbe6c36101138c1d566fb27549553b81ef2a6df41ac11a1f46766039: Status 404 returned error can't find the container with id a85bf232fbe6c36101138c1d566fb27549553b81ef2a6df41ac11a1f46766039 Nov 11 15:06:18 crc kubenswrapper[4842]: I1111 15:06:18.521819 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 15:06:19 crc kubenswrapper[4842]: I1111 15:06:19.436224 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"9103e703-2527-4102-948b-a6f7e05b2e5a","Type":"ContainerStarted","Data":"a85bf232fbe6c36101138c1d566fb27549553b81ef2a6df41ac11a1f46766039"} Nov 11 15:06:20 crc kubenswrapper[4842]: I1111 15:06:20.450523 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"9103e703-2527-4102-948b-a6f7e05b2e5a","Type":"ContainerStarted","Data":"6c20c3bb058544a947943b71c9d74384c131775309dceb9bacec11abe4c305bd"} Nov 11 15:06:20 crc kubenswrapper[4842]: I1111 15:06:20.472775 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.307216981 podStartE2EDuration="3.472752339s" podCreationTimestamp="2025-11-11 15:06:17 +0000 UTC" firstStartedPulling="2025-11-11 15:06:18.521629887 +0000 UTC m=+5789.181919506" lastFinishedPulling="2025-11-11 15:06:19.687165245 +0000 UTC m=+5790.347454864" observedRunningTime="2025-11-11 15:06:20.464082468 +0000 UTC m=+5791.124372107" watchObservedRunningTime="2025-11-11 15:06:20.472752339 +0000 UTC m=+5791.133041958" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.590039 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.593441 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.605864 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.664056 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.664211 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8rtk\" (UniqueName: \"kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.664264 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.766025 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.766117 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8rtk\" (UniqueName: \"kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.766156 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.766599 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.766655 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.787247 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8rtk\" (UniqueName: \"kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk\") pod \"community-operators-vq65q\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:20 crc kubenswrapper[4842]: I1111 15:07:20.975611 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:21 crc kubenswrapper[4842]: I1111 15:07:21.496503 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:22 crc kubenswrapper[4842]: I1111 15:07:22.093036 4842 generic.go:334] "Generic (PLEG): container finished" podID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerID="e8c269bf3c467e00e74714eefd81644cb17fc5e4d9c505e90d2c81fa42d446ea" exitCode=0 Nov 11 15:07:22 crc kubenswrapper[4842]: I1111 15:07:22.093110 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerDied","Data":"e8c269bf3c467e00e74714eefd81644cb17fc5e4d9c505e90d2c81fa42d446ea"} Nov 11 15:07:22 crc kubenswrapper[4842]: I1111 15:07:22.093439 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerStarted","Data":"833a99c4c6aff51be359e57a4167e63a60050db41983bd1c11e8d4470d4403a8"} Nov 11 15:07:23 crc kubenswrapper[4842]: I1111 15:07:23.109339 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerStarted","Data":"2c1e7b4d4c4819f8e6a9b9daf9ad817b64ec88c2e338a7ee548f4bd71ecc5ecb"} Nov 11 15:07:24 crc kubenswrapper[4842]: I1111 15:07:24.126926 4842 generic.go:334] "Generic (PLEG): container finished" podID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerID="2c1e7b4d4c4819f8e6a9b9daf9ad817b64ec88c2e338a7ee548f4bd71ecc5ecb" exitCode=0 Nov 11 15:07:24 crc kubenswrapper[4842]: I1111 15:07:24.127044 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerDied","Data":"2c1e7b4d4c4819f8e6a9b9daf9ad817b64ec88c2e338a7ee548f4bd71ecc5ecb"} Nov 11 15:07:25 crc kubenswrapper[4842]: I1111 15:07:25.137120 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerStarted","Data":"95392ef19ac59f479c4c36ad67f56c9a95637bc82cbf5b6fd9485c688aa48caa"} Nov 11 15:07:25 crc kubenswrapper[4842]: I1111 15:07:25.152879 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vq65q" podStartSLOduration=2.666659567 podStartE2EDuration="5.152861116s" podCreationTimestamp="2025-11-11 15:07:20 +0000 UTC" firstStartedPulling="2025-11-11 15:07:22.095323432 +0000 UTC m=+5852.755613051" lastFinishedPulling="2025-11-11 15:07:24.581524981 +0000 UTC m=+5855.241814600" observedRunningTime="2025-11-11 15:07:25.150982717 +0000 UTC m=+5855.811272346" watchObservedRunningTime="2025-11-11 15:07:25.152861116 +0000 UTC m=+5855.813150735" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.669485 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-hhh5t/must-gather-z76bs"] Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.683013 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.691867 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-hhh5t"/"kube-root-ca.crt" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.692228 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-hhh5t"/"openshift-service-ca.crt" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.703812 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-hhh5t"/"default-dockercfg-v4rsr" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.716894 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-hhh5t/must-gather-z76bs"] Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.855836 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.855922 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwv9c\" (UniqueName: \"kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.958087 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.958261 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwv9c\" (UniqueName: \"kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:26 crc kubenswrapper[4842]: I1111 15:07:26.959642 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:27 crc kubenswrapper[4842]: I1111 15:07:27.000130 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwv9c\" (UniqueName: \"kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c\") pod \"must-gather-z76bs\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:27 crc kubenswrapper[4842]: I1111 15:07:27.040591 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:07:27 crc kubenswrapper[4842]: I1111 15:07:27.621135 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-hhh5t/must-gather-z76bs"] Nov 11 15:07:28 crc kubenswrapper[4842]: I1111 15:07:28.181732 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/must-gather-z76bs" event={"ID":"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070","Type":"ContainerStarted","Data":"c8331818ff1649f48d4371b1bdf074aa0eaa41aa1fb7f8d77016976acf70e488"} Nov 11 15:07:30 crc kubenswrapper[4842]: I1111 15:07:30.976346 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:30 crc kubenswrapper[4842]: I1111 15:07:30.976704 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:31 crc kubenswrapper[4842]: I1111 15:07:31.061810 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:31 crc kubenswrapper[4842]: I1111 15:07:31.269950 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:31 crc kubenswrapper[4842]: I1111 15:07:31.329917 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:33 crc kubenswrapper[4842]: I1111 15:07:33.230490 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vq65q" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="registry-server" containerID="cri-o://95392ef19ac59f479c4c36ad67f56c9a95637bc82cbf5b6fd9485c688aa48caa" gracePeriod=2 Nov 11 15:07:34 crc kubenswrapper[4842]: I1111 15:07:34.243820 4842 generic.go:334] "Generic (PLEG): container finished" podID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerID="95392ef19ac59f479c4c36ad67f56c9a95637bc82cbf5b6fd9485c688aa48caa" exitCode=0 Nov 11 15:07:34 crc kubenswrapper[4842]: I1111 15:07:34.243889 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerDied","Data":"95392ef19ac59f479c4c36ad67f56c9a95637bc82cbf5b6fd9485c688aa48caa"} Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.974314 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.979342 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content\") pod \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.979541 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities\") pod \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.979679 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8rtk\" (UniqueName: \"kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk\") pod \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\" (UID: \"1df3b9a3-af2e-4d0c-8063-712263a3dae0\") " Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.981782 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities" (OuterVolumeSpecName: "utilities") pod "1df3b9a3-af2e-4d0c-8063-712263a3dae0" (UID: "1df3b9a3-af2e-4d0c-8063-712263a3dae0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:07:39 crc kubenswrapper[4842]: I1111 15:07:39.990265 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk" (OuterVolumeSpecName: "kube-api-access-j8rtk") pod "1df3b9a3-af2e-4d0c-8063-712263a3dae0" (UID: "1df3b9a3-af2e-4d0c-8063-712263a3dae0"). InnerVolumeSpecName "kube-api-access-j8rtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.034863 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1df3b9a3-af2e-4d0c-8063-712263a3dae0" (UID: "1df3b9a3-af2e-4d0c-8063-712263a3dae0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.088958 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.089847 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1df3b9a3-af2e-4d0c-8063-712263a3dae0-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.090620 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8rtk\" (UniqueName: \"kubernetes.io/projected/1df3b9a3-af2e-4d0c-8063-712263a3dae0-kube-api-access-j8rtk\") on node \"crc\" DevicePath \"\"" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.337161 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/must-gather-z76bs" event={"ID":"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070","Type":"ContainerStarted","Data":"647de456b4edaff633309a50709a3b2fc8be3018b0d4e7489d8014408c8c09a9"} Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.339606 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vq65q" event={"ID":"1df3b9a3-af2e-4d0c-8063-712263a3dae0","Type":"ContainerDied","Data":"833a99c4c6aff51be359e57a4167e63a60050db41983bd1c11e8d4470d4403a8"} Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.339635 4842 scope.go:117] "RemoveContainer" containerID="95392ef19ac59f479c4c36ad67f56c9a95637bc82cbf5b6fd9485c688aa48caa" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.339764 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vq65q" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.378078 4842 scope.go:117] "RemoveContainer" containerID="2c1e7b4d4c4819f8e6a9b9daf9ad817b64ec88c2e338a7ee548f4bd71ecc5ecb" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.411459 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.412613 4842 scope.go:117] "RemoveContainer" containerID="e8c269bf3c467e00e74714eefd81644cb17fc5e4d9c505e90d2c81fa42d446ea" Nov 11 15:07:40 crc kubenswrapper[4842]: I1111 15:07:40.419505 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vq65q"] Nov 11 15:07:41 crc kubenswrapper[4842]: I1111 15:07:41.351701 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/must-gather-z76bs" event={"ID":"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070","Type":"ContainerStarted","Data":"e38056bce75c40807c20eaa7d1392ed3f11d6d0cb2a68d67be40723f4b424e3f"} Nov 11 15:07:41 crc kubenswrapper[4842]: I1111 15:07:41.379036 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-hhh5t/must-gather-z76bs" podStartSLOduration=3.149152364 podStartE2EDuration="15.379009888s" podCreationTimestamp="2025-11-11 15:07:26 +0000 UTC" firstStartedPulling="2025-11-11 15:07:27.637815765 +0000 UTC m=+5858.298105384" lastFinishedPulling="2025-11-11 15:07:39.867673289 +0000 UTC m=+5870.527962908" observedRunningTime="2025-11-11 15:07:41.37365072 +0000 UTC m=+5872.033940339" watchObservedRunningTime="2025-11-11 15:07:41.379009888 +0000 UTC m=+5872.039299517" Nov 11 15:07:42 crc kubenswrapper[4842]: I1111 15:07:42.074514 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" path="/var/lib/kubelet/pods/1df3b9a3-af2e-4d0c-8063-712263a3dae0/volumes" Nov 11 15:07:42 crc kubenswrapper[4842]: E1111 15:07:42.905084 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:36488->38.102.83.155:44429: write tcp 38.102.83.155:36488->38.102.83.155:44429: write: broken pipe Nov 11 15:07:42 crc kubenswrapper[4842]: E1111 15:07:42.932264 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:36514->38.102.83.155:44429: write tcp 38.102.83.155:36514->38.102.83.155:44429: write: broken pipe Nov 11 15:07:43 crc kubenswrapper[4842]: E1111 15:07:43.323498 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:36584->38.102.83.155:44429: write tcp 38.102.83.155:36584->38.102.83.155:44429: write: broken pipe Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.137086 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-vprdz"] Nov 11 15:07:44 crc kubenswrapper[4842]: E1111 15:07:44.138582 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="registry-server" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.138814 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="registry-server" Nov 11 15:07:44 crc kubenswrapper[4842]: E1111 15:07:44.138844 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="extract-content" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.138853 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="extract-content" Nov 11 15:07:44 crc kubenswrapper[4842]: E1111 15:07:44.138882 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="extract-utilities" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.138890 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="extract-utilities" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.139194 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="1df3b9a3-af2e-4d0c-8063-712263a3dae0" containerName="registry-server" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.140023 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.188027 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.188254 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4wk7\" (UniqueName: \"kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.289845 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4wk7\" (UniqueName: \"kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.289973 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.290091 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.313171 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4wk7\" (UniqueName: \"kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7\") pod \"crc-debug-vprdz\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: I1111 15:07:44.464059 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:07:44 crc kubenswrapper[4842]: W1111 15:07:44.500809 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc592a159_5dfe_4bfc_a49a_234284cf80c0.slice/crio-78e2fdda9d6ee93e72e1441dcf442765ed293c6160c3ad7c21bee9206fd837df WatchSource:0}: Error finding container 78e2fdda9d6ee93e72e1441dcf442765ed293c6160c3ad7c21bee9206fd837df: Status 404 returned error can't find the container with id 78e2fdda9d6ee93e72e1441dcf442765ed293c6160c3ad7c21bee9206fd837df Nov 11 15:07:45 crc kubenswrapper[4842]: I1111 15:07:45.393246 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" event={"ID":"c592a159-5dfe-4bfc-a49a-234284cf80c0","Type":"ContainerStarted","Data":"78e2fdda9d6ee93e72e1441dcf442765ed293c6160c3ad7c21bee9206fd837df"} Nov 11 15:07:55 crc kubenswrapper[4842]: I1111 15:07:55.490051 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" event={"ID":"c592a159-5dfe-4bfc-a49a-234284cf80c0","Type":"ContainerStarted","Data":"e4514f89f3ea7224b7432b8514fb810628fa512a8e6fb7865acb3b84f5630ae9"} Nov 11 15:07:55 crc kubenswrapper[4842]: I1111 15:07:55.510734 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" podStartSLOduration=1.762260621 podStartE2EDuration="11.510715516s" podCreationTimestamp="2025-11-11 15:07:44 +0000 UTC" firstStartedPulling="2025-11-11 15:07:44.502944747 +0000 UTC m=+5875.163234366" lastFinishedPulling="2025-11-11 15:07:54.251399632 +0000 UTC m=+5884.911689261" observedRunningTime="2025-11-11 15:07:55.506522634 +0000 UTC m=+5886.166812253" watchObservedRunningTime="2025-11-11 15:07:55.510715516 +0000 UTC m=+5886.171005135" Nov 11 15:08:38 crc kubenswrapper[4842]: I1111 15:08:38.886003 4842 generic.go:334] "Generic (PLEG): container finished" podID="c592a159-5dfe-4bfc-a49a-234284cf80c0" containerID="e4514f89f3ea7224b7432b8514fb810628fa512a8e6fb7865acb3b84f5630ae9" exitCode=0 Nov 11 15:08:38 crc kubenswrapper[4842]: I1111 15:08:38.886221 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" event={"ID":"c592a159-5dfe-4bfc-a49a-234284cf80c0","Type":"ContainerDied","Data":"e4514f89f3ea7224b7432b8514fb810628fa512a8e6fb7865acb3b84f5630ae9"} Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.117222 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.120201 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.129800 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.268869 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk5d6\" (UniqueName: \"kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.269195 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.269324 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.370848 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk5d6\" (UniqueName: \"kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.371188 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.371298 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.371642 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.371691 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.393741 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk5d6\" (UniqueName: \"kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6\") pod \"redhat-operators-dl98h\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.444463 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:39 crc kubenswrapper[4842]: I1111 15:08:39.939904 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.011352 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.042233 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-vprdz"] Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.050675 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-vprdz"] Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.192458 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host\") pod \"c592a159-5dfe-4bfc-a49a-234284cf80c0\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.192552 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4wk7\" (UniqueName: \"kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7\") pod \"c592a159-5dfe-4bfc-a49a-234284cf80c0\" (UID: \"c592a159-5dfe-4bfc-a49a-234284cf80c0\") " Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.192582 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host" (OuterVolumeSpecName: "host") pod "c592a159-5dfe-4bfc-a49a-234284cf80c0" (UID: "c592a159-5dfe-4bfc-a49a-234284cf80c0"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.195006 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c592a159-5dfe-4bfc-a49a-234284cf80c0-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.197641 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7" (OuterVolumeSpecName: "kube-api-access-h4wk7") pod "c592a159-5dfe-4bfc-a49a-234284cf80c0" (UID: "c592a159-5dfe-4bfc-a49a-234284cf80c0"). InnerVolumeSpecName "kube-api-access-h4wk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.296546 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4wk7\" (UniqueName: \"kubernetes.io/projected/c592a159-5dfe-4bfc-a49a-234284cf80c0-kube-api-access-h4wk7\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.903266 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-vprdz" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.903283 4842 scope.go:117] "RemoveContainer" containerID="e4514f89f3ea7224b7432b8514fb810628fa512a8e6fb7865acb3b84f5630ae9" Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.905308 4842 generic.go:334] "Generic (PLEG): container finished" podID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerID="9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65" exitCode=0 Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.905361 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerDied","Data":"9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65"} Nov 11 15:08:40 crc kubenswrapper[4842]: I1111 15:08:40.905391 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerStarted","Data":"472105944dacb11006818630a52765af7e72baf9d182ab4726f83ab8fccdda71"} Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.228635 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-gt9rm"] Nov 11 15:08:41 crc kubenswrapper[4842]: E1111 15:08:41.229352 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c592a159-5dfe-4bfc-a49a-234284cf80c0" containerName="container-00" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.229367 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c592a159-5dfe-4bfc-a49a-234284cf80c0" containerName="container-00" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.229593 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c592a159-5dfe-4bfc-a49a-234284cf80c0" containerName="container-00" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.230460 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.325164 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzktn\" (UniqueName: \"kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.325266 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.426856 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.426984 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.427372 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzktn\" (UniqueName: \"kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.456229 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzktn\" (UniqueName: \"kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn\") pod \"crc-debug-gt9rm\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.549544 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:41 crc kubenswrapper[4842]: W1111 15:08:41.580862 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod868ef198_f506_47fc_a2ca_bf37e7f75330.slice/crio-dd09e721c669614e476c82679f14f4153461d35a9240f148be255a35beda6ff3 WatchSource:0}: Error finding container dd09e721c669614e476c82679f14f4153461d35a9240f148be255a35beda6ff3: Status 404 returned error can't find the container with id dd09e721c669614e476c82679f14f4153461d35a9240f148be255a35beda6ff3 Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.917638 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerStarted","Data":"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83"} Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.919820 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" event={"ID":"868ef198-f506-47fc-a2ca-bf37e7f75330","Type":"ContainerStarted","Data":"517cd8a5663d214df965203757d7e2a2fe4dfe89b1744e4da177ce32425b8b31"} Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.919849 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" event={"ID":"868ef198-f506-47fc-a2ca-bf37e7f75330","Type":"ContainerStarted","Data":"dd09e721c669614e476c82679f14f4153461d35a9240f148be255a35beda6ff3"} Nov 11 15:08:41 crc kubenswrapper[4842]: I1111 15:08:41.948441 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" podStartSLOduration=0.948417724 podStartE2EDuration="948.417724ms" podCreationTimestamp="2025-11-11 15:08:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 15:08:41.946065801 +0000 UTC m=+5932.606355420" watchObservedRunningTime="2025-11-11 15:08:41.948417724 +0000 UTC m=+5932.608707343" Nov 11 15:08:42 crc kubenswrapper[4842]: I1111 15:08:42.070651 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c592a159-5dfe-4bfc-a49a-234284cf80c0" path="/var/lib/kubelet/pods/c592a159-5dfe-4bfc-a49a-234284cf80c0/volumes" Nov 11 15:08:42 crc kubenswrapper[4842]: I1111 15:08:42.930544 4842 generic.go:334] "Generic (PLEG): container finished" podID="868ef198-f506-47fc-a2ca-bf37e7f75330" containerID="517cd8a5663d214df965203757d7e2a2fe4dfe89b1744e4da177ce32425b8b31" exitCode=0 Nov 11 15:08:42 crc kubenswrapper[4842]: I1111 15:08:42.930627 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" event={"ID":"868ef198-f506-47fc-a2ca-bf37e7f75330","Type":"ContainerDied","Data":"517cd8a5663d214df965203757d7e2a2fe4dfe89b1744e4da177ce32425b8b31"} Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.071260 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.191628 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host\") pod \"868ef198-f506-47fc-a2ca-bf37e7f75330\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.191691 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzktn\" (UniqueName: \"kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn\") pod \"868ef198-f506-47fc-a2ca-bf37e7f75330\" (UID: \"868ef198-f506-47fc-a2ca-bf37e7f75330\") " Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.191701 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host" (OuterVolumeSpecName: "host") pod "868ef198-f506-47fc-a2ca-bf37e7f75330" (UID: "868ef198-f506-47fc-a2ca-bf37e7f75330"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.193819 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/868ef198-f506-47fc-a2ca-bf37e7f75330-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.198127 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn" (OuterVolumeSpecName: "kube-api-access-rzktn") pod "868ef198-f506-47fc-a2ca-bf37e7f75330" (UID: "868ef198-f506-47fc-a2ca-bf37e7f75330"). InnerVolumeSpecName "kube-api-access-rzktn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.295544 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzktn\" (UniqueName: \"kubernetes.io/projected/868ef198-f506-47fc-a2ca-bf37e7f75330-kube-api-access-rzktn\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.325471 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-gt9rm"] Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.333045 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-gt9rm"] Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.953361 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd09e721c669614e476c82679f14f4153461d35a9240f148be255a35beda6ff3" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.953934 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-gt9rm" Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.961369 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:08:44 crc kubenswrapper[4842]: I1111 15:08:44.961489 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.536832 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-b8ncr"] Nov 11 15:08:45 crc kubenswrapper[4842]: E1111 15:08:45.537655 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="868ef198-f506-47fc-a2ca-bf37e7f75330" containerName="container-00" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.537672 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="868ef198-f506-47fc-a2ca-bf37e7f75330" containerName="container-00" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.537886 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="868ef198-f506-47fc-a2ca-bf37e7f75330" containerName="container-00" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.538644 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.722353 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vk4r9\" (UniqueName: \"kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.722517 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.824525 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vk4r9\" (UniqueName: \"kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.824674 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.824780 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.842828 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vk4r9\" (UniqueName: \"kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9\") pod \"crc-debug-b8ncr\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.858176 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:45 crc kubenswrapper[4842]: I1111 15:08:45.970344 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" event={"ID":"33b56128-9f3c-4229-ae81-5eea24ea9a57","Type":"ContainerStarted","Data":"cfc4848d53c27e82c4aab919e0bf9c4c527a8d91adcee4aa3415a09d8d013d20"} Nov 11 15:08:46 crc kubenswrapper[4842]: I1111 15:08:46.071941 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="868ef198-f506-47fc-a2ca-bf37e7f75330" path="/var/lib/kubelet/pods/868ef198-f506-47fc-a2ca-bf37e7f75330/volumes" Nov 11 15:08:46 crc kubenswrapper[4842]: I1111 15:08:46.981507 4842 generic.go:334] "Generic (PLEG): container finished" podID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerID="1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83" exitCode=0 Nov 11 15:08:46 crc kubenswrapper[4842]: I1111 15:08:46.981576 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerDied","Data":"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83"} Nov 11 15:08:46 crc kubenswrapper[4842]: I1111 15:08:46.983942 4842 generic.go:334] "Generic (PLEG): container finished" podID="33b56128-9f3c-4229-ae81-5eea24ea9a57" containerID="c4347be76d2980c756db0988898aead5259790322cdf32205d637b089ed7389f" exitCode=0 Nov 11 15:08:46 crc kubenswrapper[4842]: I1111 15:08:46.983965 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" event={"ID":"33b56128-9f3c-4229-ae81-5eea24ea9a57","Type":"ContainerDied","Data":"c4347be76d2980c756db0988898aead5259790322cdf32205d637b089ed7389f"} Nov 11 15:08:47 crc kubenswrapper[4842]: I1111 15:08:47.040352 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-b8ncr"] Nov 11 15:08:47 crc kubenswrapper[4842]: I1111 15:08:47.048504 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-hhh5t/crc-debug-b8ncr"] Nov 11 15:08:47 crc kubenswrapper[4842]: I1111 15:08:47.996017 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerStarted","Data":"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb"} Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.026242 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dl98h" podStartSLOduration=2.295752795 podStartE2EDuration="9.026226511s" podCreationTimestamp="2025-11-11 15:08:39 +0000 UTC" firstStartedPulling="2025-11-11 15:08:40.90717241 +0000 UTC m=+5931.567462029" lastFinishedPulling="2025-11-11 15:08:47.637646106 +0000 UTC m=+5938.297935745" observedRunningTime="2025-11-11 15:08:48.024375452 +0000 UTC m=+5938.684665081" watchObservedRunningTime="2025-11-11 15:08:48.026226511 +0000 UTC m=+5938.686516130" Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.131439 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.281949 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host\") pod \"33b56128-9f3c-4229-ae81-5eea24ea9a57\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.282318 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vk4r9\" (UniqueName: \"kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9\") pod \"33b56128-9f3c-4229-ae81-5eea24ea9a57\" (UID: \"33b56128-9f3c-4229-ae81-5eea24ea9a57\") " Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.282082 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host" (OuterVolumeSpecName: "host") pod "33b56128-9f3c-4229-ae81-5eea24ea9a57" (UID: "33b56128-9f3c-4229-ae81-5eea24ea9a57"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.283504 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/33b56128-9f3c-4229-ae81-5eea24ea9a57-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.295768 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9" (OuterVolumeSpecName: "kube-api-access-vk4r9") pod "33b56128-9f3c-4229-ae81-5eea24ea9a57" (UID: "33b56128-9f3c-4229-ae81-5eea24ea9a57"). InnerVolumeSpecName "kube-api-access-vk4r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:08:48 crc kubenswrapper[4842]: I1111 15:08:48.385476 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vk4r9\" (UniqueName: \"kubernetes.io/projected/33b56128-9f3c-4229-ae81-5eea24ea9a57-kube-api-access-vk4r9\") on node \"crc\" DevicePath \"\"" Nov 11 15:08:49 crc kubenswrapper[4842]: I1111 15:08:49.008336 4842 scope.go:117] "RemoveContainer" containerID="c4347be76d2980c756db0988898aead5259790322cdf32205d637b089ed7389f" Nov 11 15:08:49 crc kubenswrapper[4842]: I1111 15:08:49.009259 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/crc-debug-b8ncr" Nov 11 15:08:49 crc kubenswrapper[4842]: I1111 15:08:49.444987 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:49 crc kubenswrapper[4842]: I1111 15:08:49.445356 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:50 crc kubenswrapper[4842]: I1111 15:08:50.072178 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33b56128-9f3c-4229-ae81-5eea24ea9a57" path="/var/lib/kubelet/pods/33b56128-9f3c-4229-ae81-5eea24ea9a57/volumes" Nov 11 15:08:50 crc kubenswrapper[4842]: I1111 15:08:50.491188 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dl98h" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="registry-server" probeResult="failure" output=< Nov 11 15:08:50 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 15:08:50 crc kubenswrapper[4842]: > Nov 11 15:08:59 crc kubenswrapper[4842]: I1111 15:08:59.514762 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:59 crc kubenswrapper[4842]: I1111 15:08:59.560998 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:08:59 crc kubenswrapper[4842]: I1111 15:08:59.754900 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.136548 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dl98h" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="registry-server" containerID="cri-o://34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb" gracePeriod=2 Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.658646 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.691663 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk5d6\" (UniqueName: \"kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6\") pod \"bd1086ad-2366-4e8c-a739-96e80fa62a48\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.691717 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities\") pod \"bd1086ad-2366-4e8c-a739-96e80fa62a48\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.691740 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content\") pod \"bd1086ad-2366-4e8c-a739-96e80fa62a48\" (UID: \"bd1086ad-2366-4e8c-a739-96e80fa62a48\") " Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.694185 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities" (OuterVolumeSpecName: "utilities") pod "bd1086ad-2366-4e8c-a739-96e80fa62a48" (UID: "bd1086ad-2366-4e8c-a739-96e80fa62a48"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.702408 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6" (OuterVolumeSpecName: "kube-api-access-xk5d6") pod "bd1086ad-2366-4e8c-a739-96e80fa62a48" (UID: "bd1086ad-2366-4e8c-a739-96e80fa62a48"). InnerVolumeSpecName "kube-api-access-xk5d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.790612 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd1086ad-2366-4e8c-a739-96e80fa62a48" (UID: "bd1086ad-2366-4e8c-a739-96e80fa62a48"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.793954 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xk5d6\" (UniqueName: \"kubernetes.io/projected/bd1086ad-2366-4e8c-a739-96e80fa62a48-kube-api-access-xk5d6\") on node \"crc\" DevicePath \"\"" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.793987 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:09:01 crc kubenswrapper[4842]: I1111 15:09:01.793998 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd1086ad-2366-4e8c-a739-96e80fa62a48-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.146289 4842 generic.go:334] "Generic (PLEG): container finished" podID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerID="34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb" exitCode=0 Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.146374 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dl98h" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.146372 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerDied","Data":"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb"} Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.147529 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dl98h" event={"ID":"bd1086ad-2366-4e8c-a739-96e80fa62a48","Type":"ContainerDied","Data":"472105944dacb11006818630a52765af7e72baf9d182ab4726f83ab8fccdda71"} Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.147555 4842 scope.go:117] "RemoveContainer" containerID="34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.173543 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.183298 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dl98h"] Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.183547 4842 scope.go:117] "RemoveContainer" containerID="1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.202881 4842 scope.go:117] "RemoveContainer" containerID="9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.251160 4842 scope.go:117] "RemoveContainer" containerID="34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb" Nov 11 15:09:02 crc kubenswrapper[4842]: E1111 15:09:02.251675 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb\": container with ID starting with 34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb not found: ID does not exist" containerID="34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.251733 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb"} err="failed to get container status \"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb\": rpc error: code = NotFound desc = could not find container \"34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb\": container with ID starting with 34b56206d2688b9b6c2a4dd79e35732529200b0ec1c8925195ea02cc6bbf99bb not found: ID does not exist" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.251765 4842 scope.go:117] "RemoveContainer" containerID="1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83" Nov 11 15:09:02 crc kubenswrapper[4842]: E1111 15:09:02.252187 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83\": container with ID starting with 1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83 not found: ID does not exist" containerID="1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.252222 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83"} err="failed to get container status \"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83\": rpc error: code = NotFound desc = could not find container \"1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83\": container with ID starting with 1ecacc11addde618881f0f53a10fb16876f1a6f1dd9b9196f3defec1d12d2b83 not found: ID does not exist" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.252245 4842 scope.go:117] "RemoveContainer" containerID="9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65" Nov 11 15:09:02 crc kubenswrapper[4842]: E1111 15:09:02.252611 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65\": container with ID starting with 9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65 not found: ID does not exist" containerID="9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65" Nov 11 15:09:02 crc kubenswrapper[4842]: I1111 15:09:02.252644 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65"} err="failed to get container status \"9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65\": rpc error: code = NotFound desc = could not find container \"9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65\": container with ID starting with 9129f78d525d8f8b2ad4e282f84f046d0f9db8a4662df1397979a8c57596ef65 not found: ID does not exist" Nov 11 15:09:04 crc kubenswrapper[4842]: I1111 15:09:04.071388 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" path="/var/lib/kubelet/pods/bd1086ad-2366-4e8c-a739-96e80fa62a48/volumes" Nov 11 15:09:14 crc kubenswrapper[4842]: I1111 15:09:14.960812 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:09:14 crc kubenswrapper[4842]: I1111 15:09:14.961863 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:09:44 crc kubenswrapper[4842]: I1111 15:09:44.961154 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:09:44 crc kubenswrapper[4842]: I1111 15:09:44.961674 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:09:44 crc kubenswrapper[4842]: I1111 15:09:44.961724 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 15:09:44 crc kubenswrapper[4842]: I1111 15:09:44.962612 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 15:09:44 crc kubenswrapper[4842]: I1111 15:09:44.962666 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" gracePeriod=600 Nov 11 15:09:45 crc kubenswrapper[4842]: E1111 15:09:45.085631 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:09:45 crc kubenswrapper[4842]: I1111 15:09:45.625039 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" exitCode=0 Nov 11 15:09:45 crc kubenswrapper[4842]: I1111 15:09:45.625129 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c"} Nov 11 15:09:45 crc kubenswrapper[4842]: I1111 15:09:45.625398 4842 scope.go:117] "RemoveContainer" containerID="868a3f2f3497955b368e456ab0f14696840fd159648bafcf039bcce3bcc3c0ab" Nov 11 15:09:45 crc kubenswrapper[4842]: I1111 15:09:45.626405 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:09:45 crc kubenswrapper[4842]: E1111 15:09:45.626857 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:09:57 crc kubenswrapper[4842]: I1111 15:09:57.061149 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:09:57 crc kubenswrapper[4842]: E1111 15:09:57.062017 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.282457 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-95f8fc9b8-pc2pp_b13026a5-f118-43d9-b363-84f9ae14379c/barbican-api/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.381481 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-95f8fc9b8-pc2pp_b13026a5-f118-43d9-b363-84f9ae14379c/barbican-api-log/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.529599 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dfc9c458b-t66x8_e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef/barbican-keystone-listener/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.599963 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dfc9c458b-t66x8_e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef/barbican-keystone-listener-log/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.657449 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b56b7d577-bj4cr_c1fe7c75-ea0d-41ed-b79a-7ecce3779047/barbican-worker/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.736967 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b56b7d577-bj4cr_c1fe7c75-ea0d-41ed-b79a-7ecce3779047/barbican-worker-log/0.log" Nov 11 15:10:00 crc kubenswrapper[4842]: I1111 15:10:00.844768 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2_cbeea580-daef-4e97-898b-c194a52a4e97/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.035928 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/ceilometer-central-agent/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.075500 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/proxy-httpd/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.118224 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/ceilometer-notification-agent/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.159383 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/sg-core/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.347401 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_317f46b7-2e01-425e-8d9a-7df1c63a0d34/cinder-api-log/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.656612 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_07ee075d-6090-4f91-9908-223be5beff86/probe/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.760935 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_317f46b7-2e01-425e-8d9a-7df1c63a0d34/cinder-api/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.838516 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_07ee075d-6090-4f91-9908-223be5beff86/cinder-backup/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.897176 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5eec4bcc-3000-4c55-99ea-0bee19c6fa86/cinder-scheduler/0.log" Nov 11 15:10:01 crc kubenswrapper[4842]: I1111 15:10:01.973702 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5eec4bcc-3000-4c55-99ea-0bee19c6fa86/probe/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.182729 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_3c06a622-086f-4df5-beaa-67d62802c249/probe/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.209798 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_3c06a622-086f-4df5-beaa-67d62802c249/cinder-volume/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.425850 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_51936d85-49d4-4413-b8f0-0c582381a663/cinder-volume/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.434244 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_51936d85-49d4-4413-b8f0-0c582381a663/probe/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.504124 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-79s62_53ccc159-9d7f-41b0-8f5d-bc9521be7f1a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.735915 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-btvfg_f94c8fa3-0b93-4cf3-9aae-9feb9bc79273/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:02 crc kubenswrapper[4842]: I1111 15:10:02.749277 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/init/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.015159 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/init/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.133742 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-wfksg_77d635ff-fb62-482a-b81e-18a8e371d404/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.188977 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/dnsmasq-dns/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.347205 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_069ae2bc-eee5-4b02-9dd7-602303027ee4/glance-httpd/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.386712 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_069ae2bc-eee5-4b02-9dd7-602303027ee4/glance-log/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.549955 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_762a07fe-46d0-4852-bfef-6ed8007dcd63/glance-httpd/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.615014 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_762a07fe-46d0-4852-bfef-6ed8007dcd63/glance-log/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.782940 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7ccc6f5856-tt6gw_dde43d8b-9a6f-4506-9285-0606a6e04361/horizon/0.log" Nov 11 15:10:03 crc kubenswrapper[4842]: I1111 15:10:03.882493 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs_7252c6dc-fc9e-44ee-bea7-1b61760f4f8e/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.080188 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-8drsm_a6455983-1479-4b83-a9ba-2aef71382fc7/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.545065 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7ccc6f5856-tt6gw_dde43d8b-9a6f-4506-9285-0606a6e04361/horizon-log/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.577294 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29381161-b8tsl_4401a195-a0c5-46b6-9b52-8e83c88ef55d/keystone-cron/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.757706 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c6ccd957-kmlcp_e5ae2f35-b0d7-480b-8f4d-cda875e63ec2/keystone-api/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.835390 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_5f2c2abf-63ce-49fa-a178-57088955a295/kube-state-metrics/0.log" Nov 11 15:10:04 crc kubenswrapper[4842]: I1111 15:10:04.837888 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29381221-6dgbk_5b5757ad-60d5-4159-a919-b2b784bd5072/keystone-cron/0.log" Nov 11 15:10:05 crc kubenswrapper[4842]: I1111 15:10:05.022241 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx_28de897b-72a6-4d7b-b7e7-e205a32fe32d/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:05 crc kubenswrapper[4842]: I1111 15:10:05.527521 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp_576c96ec-4ad4-4eee-ae3b-10b4b4aa5524/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:05 crc kubenswrapper[4842]: I1111 15:10:05.547003 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cbfd54f69-xg8r8_18c3a0a5-32fd-44f4-8d0e-beb556aab16b/neutron-httpd/0.log" Nov 11 15:10:05 crc kubenswrapper[4842]: I1111 15:10:05.654591 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cbfd54f69-xg8r8_18c3a0a5-32fd-44f4-8d0e-beb556aab16b/neutron-api/0.log" Nov 11 15:10:06 crc kubenswrapper[4842]: I1111 15:10:06.426024 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_c743e88d-2ca8-45a8-9e26-7106975b5be3/nova-cell0-conductor-conductor/0.log" Nov 11 15:10:06 crc kubenswrapper[4842]: I1111 15:10:06.735147 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49/nova-cell1-conductor-conductor/0.log" Nov 11 15:10:07 crc kubenswrapper[4842]: I1111 15:10:07.061400 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_96d33b5d-2ce7-480d-8e48-1badc4624a2f/nova-cell1-novncproxy-novncproxy/0.log" Nov 11 15:10:07 crc kubenswrapper[4842]: I1111 15:10:07.327158 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-65pxq_908c0c25-452c-4fba-9fbd-d76fa35416af/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:07 crc kubenswrapper[4842]: I1111 15:10:07.572838 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_23d198b8-bfde-44cc-909f-593c5a1968a3/nova-api-log/0.log" Nov 11 15:10:07 crc kubenswrapper[4842]: I1111 15:10:07.598887 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d993b82e-e35e-44d7-9e76-1bae92e42c25/nova-metadata-log/0.log" Nov 11 15:10:07 crc kubenswrapper[4842]: I1111 15:10:07.894874 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_23d198b8-bfde-44cc-909f-593c5a1968a3/nova-api-api/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.167028 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_c57355b5-f8b3-463a-933b-fedf7d2886a6/nova-scheduler-scheduler/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.186674 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/mysql-bootstrap/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.375146 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/mysql-bootstrap/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.472800 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/galera/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.573088 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/mysql-bootstrap/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.847955 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/mysql-bootstrap/0.log" Nov 11 15:10:08 crc kubenswrapper[4842]: I1111 15:10:08.897567 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/galera/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.059001 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:10:09 crc kubenswrapper[4842]: E1111 15:10:09.059266 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.085876 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-f7sn5_6ae937ce-ab8b-471f-b809-821ca6f23ecd/ovn-controller/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.107280 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_414a02e6-eebe-4988-99fd-1bf1651fa858/openstackclient/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.566351 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rlvf6_df77e52b-398e-454a-bbf5-0bac66c17380/openstack-network-exporter/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.731479 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d993b82e-e35e-44d7-9e76-1bae92e42c25/nova-metadata-metadata/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.742903 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server-init/0.log" Nov 11 15:10:09 crc kubenswrapper[4842]: I1111 15:10:09.959238 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server-init/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.054638 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.257906 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-sqg56_92fb25d5-d93a-4932-8d37-94ca7302c774/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.307622 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e0df8170-d8dd-4ad1-9a30-d60e06fa07f7/openstack-network-exporter/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.358775 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovs-vswitchd/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.509747 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e0df8170-d8dd-4ad1-9a30-d60e06fa07f7/ovn-northd/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.523143 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9f712abc-0d24-4fc2-a103-c102a8833466/openstack-network-exporter/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.605831 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9f712abc-0d24-4fc2-a103-c102a8833466/ovsdbserver-nb/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.723993 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85/openstack-network-exporter/0.log" Nov 11 15:10:10 crc kubenswrapper[4842]: I1111 15:10:10.802315 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85/ovsdbserver-sb/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.150620 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-94c6f6d9b-ns8g4_2494da24-b74f-4317-8bf2-80e0335c5648/placement-api/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.163036 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/init-config-reloader/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.232180 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-94c6f6d9b-ns8g4_2494da24-b74f-4317-8bf2-80e0335c5648/placement-log/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.318654 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/init-config-reloader/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.412837 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/config-reloader/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.482796 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/prometheus/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.517801 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/thanos-sidecar/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.670660 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/setup-container/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.922633 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/setup-container/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.931089 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/rabbitmq/0.log" Nov 11 15:10:11 crc kubenswrapper[4842]: I1111 15:10:11.954814 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/setup-container/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.203233 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/setup-container/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.213965 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/setup-container/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.234156 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/rabbitmq/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.458556 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/rabbitmq/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.498012 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh_c87f657c-bfbc-4d66-9a66-f751fa8ac3ac/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.501546 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/setup-container/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.679003 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-wz4g4_c8e3c3e6-73ba-490d-b0f5-c99a557f7129/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:12 crc kubenswrapper[4842]: I1111 15:10:12.763661 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2_50a93092-7567-4563-a8cc-9393aaf10eae/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.255218 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-h2rqw_3250570c-99a1-4981-a05a-4ba474ed0ab2/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.263374 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-tmvxh_448641d9-e39c-4fe4-bc02-cbf87ea74789/ssh-known-hosts-edpm-deployment/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.528517 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6fb8686df5-bdtrx_65b2ac4c-d60a-4926-a3b1-88018ce9c369/proxy-server/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.675829 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6fb8686df5-bdtrx_65b2ac4c-d60a-4926-a3b1-88018ce9c369/proxy-httpd/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.708992 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-z5kzq_8b12adcf-9678-4493-8035-061dcdf98b6e/swift-ring-rebalance/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.877024 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-auditor/0.log" Nov 11 15:10:13 crc kubenswrapper[4842]: I1111 15:10:13.899533 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-reaper/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.127262 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-auditor/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.130216 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-server/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.142316 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-replicator/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.245697 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-replicator/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.329516 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-auditor/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.344415 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-updater/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.398186 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-server/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.523248 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-expirer/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.555112 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-server/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.570987 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-replicator/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.681361 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-updater/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.750909 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/rsync/0.log" Nov 11 15:10:14 crc kubenswrapper[4842]: I1111 15:10:14.764699 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/swift-recon-cron/0.log" Nov 11 15:10:15 crc kubenswrapper[4842]: I1111 15:10:15.052649 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_42e4762f-5636-4ea5-914b-142ccc708e6d/tempest-tests-tempest-tests-runner/0.log" Nov 11 15:10:15 crc kubenswrapper[4842]: I1111 15:10:15.067328 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c_c203745d-d249-4515-ac25-d99b78d65d2e/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:15 crc kubenswrapper[4842]: I1111 15:10:15.312674 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bs62h_b96d38f2-b032-495a-8296-72c06458c86f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:10:15 crc kubenswrapper[4842]: I1111 15:10:15.326566 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_9103e703-2527-4102-948b-a6f7e05b2e5a/test-operator-logs-container/0.log" Nov 11 15:10:16 crc kubenswrapper[4842]: I1111 15:10:16.626979 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_9f38d3ba-5c82-4503-a865-35767c1f1147/watcher-applier/0.log" Nov 11 15:10:16 crc kubenswrapper[4842]: I1111 15:10:16.883591 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_bad883a9-7045-46c2-8358-aa3a6d8f7f01/watcher-api-log/0.log" Nov 11 15:10:19 crc kubenswrapper[4842]: I1111 15:10:19.968897 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_3bf4b336-4ea3-4fe5-ad16-5a6047338cf3/watcher-decision-engine/0.log" Nov 11 15:10:20 crc kubenswrapper[4842]: I1111 15:10:20.881539 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_bad883a9-7045-46c2-8358-aa3a6d8f7f01/watcher-api/0.log" Nov 11 15:10:22 crc kubenswrapper[4842]: I1111 15:10:22.060168 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:10:22 crc kubenswrapper[4842]: E1111 15:10:22.060720 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:10:25 crc kubenswrapper[4842]: I1111 15:10:25.333487 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_fd2b40ae-3270-4f5b-9700-026adaf919ca/memcached/0.log" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.614443 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:33 crc kubenswrapper[4842]: E1111 15:10:33.615342 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="extract-content" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615354 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="extract-content" Nov 11 15:10:33 crc kubenswrapper[4842]: E1111 15:10:33.615384 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="extract-utilities" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615390 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="extract-utilities" Nov 11 15:10:33 crc kubenswrapper[4842]: E1111 15:10:33.615396 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33b56128-9f3c-4229-ae81-5eea24ea9a57" containerName="container-00" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615403 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="33b56128-9f3c-4229-ae81-5eea24ea9a57" containerName="container-00" Nov 11 15:10:33 crc kubenswrapper[4842]: E1111 15:10:33.615413 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="registry-server" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615419 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="registry-server" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615652 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="33b56128-9f3c-4229-ae81-5eea24ea9a57" containerName="container-00" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.615670 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd1086ad-2366-4e8c-a739-96e80fa62a48" containerName="registry-server" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.617157 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.634882 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.709010 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.709064 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq6hd\" (UniqueName: \"kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.709201 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.810874 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.810919 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq6hd\" (UniqueName: \"kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.810948 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.811395 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.811510 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.837477 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq6hd\" (UniqueName: \"kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd\") pod \"certified-operators-x6szf\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:33 crc kubenswrapper[4842]: I1111 15:10:33.934118 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:34 crc kubenswrapper[4842]: I1111 15:10:34.465296 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:34 crc kubenswrapper[4842]: I1111 15:10:34.584039 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerStarted","Data":"d74cd4cfd225f67aaee90b9073641f973cfac18095fb5f6de151dc9e0db77b1c"} Nov 11 15:10:35 crc kubenswrapper[4842]: I1111 15:10:35.059715 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:10:35 crc kubenswrapper[4842]: E1111 15:10:35.060383 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:10:35 crc kubenswrapper[4842]: I1111 15:10:35.593825 4842 generic.go:334] "Generic (PLEG): container finished" podID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerID="93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b" exitCode=0 Nov 11 15:10:35 crc kubenswrapper[4842]: I1111 15:10:35.593878 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerDied","Data":"93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b"} Nov 11 15:10:36 crc kubenswrapper[4842]: I1111 15:10:36.604929 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerStarted","Data":"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556"} Nov 11 15:10:37 crc kubenswrapper[4842]: I1111 15:10:37.616462 4842 generic.go:334] "Generic (PLEG): container finished" podID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerID="5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556" exitCode=0 Nov 11 15:10:37 crc kubenswrapper[4842]: I1111 15:10:37.616506 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerDied","Data":"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556"} Nov 11 15:10:38 crc kubenswrapper[4842]: I1111 15:10:38.629308 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerStarted","Data":"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a"} Nov 11 15:10:38 crc kubenswrapper[4842]: I1111 15:10:38.649173 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x6szf" podStartSLOduration=3.106750827 podStartE2EDuration="5.649153561s" podCreationTimestamp="2025-11-11 15:10:33 +0000 UTC" firstStartedPulling="2025-11-11 15:10:35.595888572 +0000 UTC m=+6046.256178181" lastFinishedPulling="2025-11-11 15:10:38.138291276 +0000 UTC m=+6048.798580915" observedRunningTime="2025-11-11 15:10:38.643148803 +0000 UTC m=+6049.303438442" watchObservedRunningTime="2025-11-11 15:10:38.649153561 +0000 UTC m=+6049.309443200" Nov 11 15:10:43 crc kubenswrapper[4842]: I1111 15:10:43.934391 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:43 crc kubenswrapper[4842]: I1111 15:10:43.934994 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:43 crc kubenswrapper[4842]: I1111 15:10:43.978668 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:44 crc kubenswrapper[4842]: I1111 15:10:44.729020 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:44 crc kubenswrapper[4842]: I1111 15:10:44.774736 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:45 crc kubenswrapper[4842]: I1111 15:10:45.640532 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6999776966-pnbdh_b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e/kube-rbac-proxy/0.log" Nov 11 15:10:45 crc kubenswrapper[4842]: I1111 15:10:45.659348 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6999776966-pnbdh_b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e/manager/0.log" Nov 11 15:10:45 crc kubenswrapper[4842]: I1111 15:10:45.862044 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8dffd86b7-rldzd_b7462081-0162-4bd6-96fe-23a8c29df0db/kube-rbac-proxy/0.log" Nov 11 15:10:45 crc kubenswrapper[4842]: I1111 15:10:45.997808 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8dffd86b7-rldzd_b7462081-0162-4bd6-96fe-23a8c29df0db/manager/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.089411 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-67455b77fb-8g2hw_c0dc7222-a511-4010-b7ad-f1d4716958f8/kube-rbac-proxy/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.149006 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-67455b77fb-8g2hw_c0dc7222-a511-4010-b7ad-f1d4716958f8/manager/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.207356 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.400668 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.441032 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.455885 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.646019 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.679873 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/extract/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.697895 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x6szf" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="registry-server" containerID="cri-o://36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a" gracePeriod=2 Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.701133 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.945625 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-774b65955b-mtvmd_7c7223fc-d7fe-416d-8c4f-872f399ad3f3/kube-rbac-proxy/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.947674 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6b57d4f86f-pkdl8_00cc5552-7130-40ca-ab43-b6525d3199f4/kube-rbac-proxy/0.log" Nov 11 15:10:46 crc kubenswrapper[4842]: I1111 15:10:46.970291 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-774b65955b-mtvmd_7c7223fc-d7fe-416d-8c4f-872f399ad3f3/manager/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.171461 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6b57d4f86f-pkdl8_00cc5552-7130-40ca-ab43-b6525d3199f4/manager/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.225303 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.232610 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d445c6d8b-bqk7b_d200f269-63a1-4cee-820f-1b42538f1fb9/manager/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.289603 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rq6hd\" (UniqueName: \"kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd\") pod \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.289687 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities\") pod \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.289783 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content\") pod \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\" (UID: \"6f84c2ca-9f50-4073-b058-7a5347f97cdc\") " Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.292018 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities" (OuterVolumeSpecName: "utilities") pod "6f84c2ca-9f50-4073-b058-7a5347f97cdc" (UID: "6f84c2ca-9f50-4073-b058-7a5347f97cdc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.301967 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd" (OuterVolumeSpecName: "kube-api-access-rq6hd") pod "6f84c2ca-9f50-4073-b058-7a5347f97cdc" (UID: "6f84c2ca-9f50-4073-b058-7a5347f97cdc"). InnerVolumeSpecName "kube-api-access-rq6hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.303135 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d445c6d8b-bqk7b_d200f269-63a1-4cee-820f-1b42538f1fb9/kube-rbac-proxy/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.355251 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f84c2ca-9f50-4073-b058-7a5347f97cdc" (UID: "6f84c2ca-9f50-4073-b058-7a5347f97cdc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.393468 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rq6hd\" (UniqueName: \"kubernetes.io/projected/6f84c2ca-9f50-4073-b058-7a5347f97cdc-kube-api-access-rq6hd\") on node \"crc\" DevicePath \"\"" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.393523 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.393534 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f84c2ca-9f50-4073-b058-7a5347f97cdc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.451089 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-64cbcd8bcf-b9q8b_6c3ed8b6-85b5-402b-994a-ca068cc5a357/kube-rbac-proxy/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.639259 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-8444f8f688-gl575_8976057b-f908-4295-93a2-0bd3bb1441da/kube-rbac-proxy/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.684372 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-64cbcd8bcf-b9q8b_6c3ed8b6-85b5-402b-994a-ca068cc5a357/manager/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.706661 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-8444f8f688-gl575_8976057b-f908-4295-93a2-0bd3bb1441da/manager/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.710534 4842 generic.go:334] "Generic (PLEG): container finished" podID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerID="36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a" exitCode=0 Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.710635 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x6szf" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.710799 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerDied","Data":"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a"} Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.710897 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x6szf" event={"ID":"6f84c2ca-9f50-4073-b058-7a5347f97cdc","Type":"ContainerDied","Data":"d74cd4cfd225f67aaee90b9073641f973cfac18095fb5f6de151dc9e0db77b1c"} Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.710976 4842 scope.go:117] "RemoveContainer" containerID="36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.731917 4842 scope.go:117] "RemoveContainer" containerID="5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.762184 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.769819 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x6szf"] Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.782600 4842 scope.go:117] "RemoveContainer" containerID="93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.827401 4842 scope.go:117] "RemoveContainer" containerID="36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a" Nov 11 15:10:47 crc kubenswrapper[4842]: E1111 15:10:47.832507 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a\": container with ID starting with 36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a not found: ID does not exist" containerID="36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.832554 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a"} err="failed to get container status \"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a\": rpc error: code = NotFound desc = could not find container \"36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a\": container with ID starting with 36656e708def50f29340f0b1b5cd420598ce7f8fe19a7b27663fa58bc8c8f91a not found: ID does not exist" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.832586 4842 scope.go:117] "RemoveContainer" containerID="5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556" Nov 11 15:10:47 crc kubenswrapper[4842]: E1111 15:10:47.833363 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556\": container with ID starting with 5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556 not found: ID does not exist" containerID="5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.833422 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556"} err="failed to get container status \"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556\": rpc error: code = NotFound desc = could not find container \"5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556\": container with ID starting with 5ecea0a0790bd4cc9c4860d2edfbcb818535ca6b70da6c458b3d8cd0c0e01556 not found: ID does not exist" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.833448 4842 scope.go:117] "RemoveContainer" containerID="93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b" Nov 11 15:10:47 crc kubenswrapper[4842]: E1111 15:10:47.833773 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b\": container with ID starting with 93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b not found: ID does not exist" containerID="93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.833813 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b"} err="failed to get container status \"93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b\": rpc error: code = NotFound desc = could not find container \"93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b\": container with ID starting with 93b6d0db9c6bb768fff2530a07280f89e0d6f3622bcf6278d60c937a38b00c4b not found: ID does not exist" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.907989 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c68d88c57-2k92j_d90f01be-5138-44bc-8330-0e8ee3914ba8/kube-rbac-proxy/0.log" Nov 11 15:10:47 crc kubenswrapper[4842]: I1111 15:10:47.965015 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c68d88c57-2k92j_d90f01be-5138-44bc-8330-0e8ee3914ba8/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.023928 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67c5b7495b-2sfch_fae3f9e9-7308-454c-80e2-c836cfa04a44/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.059672 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:10:48 crc kubenswrapper[4842]: E1111 15:10:48.060040 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.072590 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" path="/var/lib/kubelet/pods/6f84c2ca-9f50-4073-b058-7a5347f97cdc/volumes" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.115724 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67c5b7495b-2sfch_fae3f9e9-7308-454c-80e2-c836cfa04a44/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.208942 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-697bcb486c-xcdsm_20a746aa-153e-4ad3-afb7-e5d771927b18/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.247327 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-697bcb486c-xcdsm_20a746aa-153e-4ad3-afb7-e5d771927b18/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.340438 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-fdd8575d6-rqzfb_d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.481495 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-fdd8575d6-rqzfb_d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.589142 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-8588b44bb6-2m4gd_828ba013-e0fe-452c-a8ae-2dbb8e9436b4/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.645634 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-8588b44bb6-2m4gd_828ba013-e0fe-452c-a8ae-2dbb8e9436b4/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.766699 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-97dc668d8-scbz4_8458ea94-f568-498e-9f67-f1a31cdb2fdf/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.793660 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-97dc668d8-scbz4_8458ea94-f568-498e-9f67-f1a31cdb2fdf/manager/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.900941 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-54948dd897l2jn8_3f8fa56e-98d6-4af9-9ea6-13917e0c5aee/kube-rbac-proxy/0.log" Nov 11 15:10:48 crc kubenswrapper[4842]: I1111 15:10:48.996432 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-54948dd897l2jn8_3f8fa56e-98d6-4af9-9ea6-13917e0c5aee/manager/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.056621 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56b55c68d5-v2ffq_74d23e18-019f-4f09-9011-8d495ff3c70b/kube-rbac-proxy/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.243653 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77d445568-tvtkj_e928b7e0-ef80-4622-9bec-93c14a6c734d/kube-rbac-proxy/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.475038 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-clx45_249af774-2a96-4177-bede-702ebe9025c9/registry-server/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.512624 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77d445568-tvtkj_e928b7e0-ef80-4622-9bec-93c14a6c734d/operator/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.633153 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6559d764b4-ntbtw_85789962-b64f-422a-a2b4-4f98a786be81/kube-rbac-proxy/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.729003 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6559d764b4-ntbtw_85789962-b64f-422a-a2b4-4f98a786be81/manager/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.870709 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-776bc4cb49-lh5x5_0b0bd151-ad85-46db-8425-fe640a956d01/kube-rbac-proxy/0.log" Nov 11 15:10:49 crc kubenswrapper[4842]: I1111 15:10:49.908656 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-776bc4cb49-lh5x5_0b0bd151-ad85-46db-8425-fe640a956d01/manager/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.109607 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-phmkh_dcb386ac-da43-4629-a57f-1d272c31bd46/operator/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.124443 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-57cf4f487c-8hwbs_79eedf2f-0af7-46fa-aa0e-7d965ee918d3/kube-rbac-proxy/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.252609 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-57cf4f487c-8hwbs_79eedf2f-0af7-46fa-aa0e-7d965ee918d3/manager/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.379355 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5cc784f744-5p2r8_f179c06b-83ea-4ece-b789-7bb5d75e05d5/kube-rbac-proxy/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.592506 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-66ff8cb84f-nqlnk_0f38813f-c55c-43d3-94bd-3ee9152e3db3/kube-rbac-proxy/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.629705 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-66ff8cb84f-nqlnk_0f38813f-c55c-43d3-94bd-3ee9152e3db3/manager/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.713045 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5cc784f744-5p2r8_f179c06b-83ea-4ece-b789-7bb5d75e05d5/manager/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.770414 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56b55c68d5-v2ffq_74d23e18-019f-4f09-9011-8d495ff3c70b/manager/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.842361 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6c495746fb-mgjxt_cfcfc6be-d566-4ba4-87e9-6157d249adc0/kube-rbac-proxy/0.log" Nov 11 15:10:50 crc kubenswrapper[4842]: I1111 15:10:50.877830 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6c495746fb-mgjxt_cfcfc6be-d566-4ba4-87e9-6157d249adc0/manager/0.log" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.497668 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:10:58 crc kubenswrapper[4842]: E1111 15:10:58.498960 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="registry-server" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.498978 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="registry-server" Nov 11 15:10:58 crc kubenswrapper[4842]: E1111 15:10:58.499003 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="extract-utilities" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.499010 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="extract-utilities" Nov 11 15:10:58 crc kubenswrapper[4842]: E1111 15:10:58.499079 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="extract-content" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.499087 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="extract-content" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.499439 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f84c2ca-9f50-4073-b058-7a5347f97cdc" containerName="registry-server" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.501399 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.509039 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.608477 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.608545 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.608662 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf5rw\" (UniqueName: \"kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.710717 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.711235 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.711182 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.711492 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.711634 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf5rw\" (UniqueName: \"kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.731456 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf5rw\" (UniqueName: \"kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw\") pod \"redhat-marketplace-klvsk\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:58 crc kubenswrapper[4842]: I1111 15:10:58.826765 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:10:59 crc kubenswrapper[4842]: I1111 15:10:59.343405 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:10:59 crc kubenswrapper[4842]: I1111 15:10:59.836044 4842 generic.go:334] "Generic (PLEG): container finished" podID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerID="7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0" exitCode=0 Nov 11 15:10:59 crc kubenswrapper[4842]: I1111 15:10:59.836153 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerDied","Data":"7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0"} Nov 11 15:10:59 crc kubenswrapper[4842]: I1111 15:10:59.836610 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerStarted","Data":"c22f101cc2f742cdbf516b5bd913ea3c5cf5eca08f082e6492951b4eb5c819a6"} Nov 11 15:11:01 crc kubenswrapper[4842]: I1111 15:11:01.854035 4842 generic.go:334] "Generic (PLEG): container finished" podID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerID="c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5" exitCode=0 Nov 11 15:11:01 crc kubenswrapper[4842]: I1111 15:11:01.854086 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerDied","Data":"c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5"} Nov 11 15:11:02 crc kubenswrapper[4842]: I1111 15:11:02.059535 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:11:02 crc kubenswrapper[4842]: E1111 15:11:02.059801 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:11:02 crc kubenswrapper[4842]: I1111 15:11:02.865769 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerStarted","Data":"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d"} Nov 11 15:11:02 crc kubenswrapper[4842]: I1111 15:11:02.890480 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-klvsk" podStartSLOduration=2.406217644 podStartE2EDuration="4.89045805s" podCreationTimestamp="2025-11-11 15:10:58 +0000 UTC" firstStartedPulling="2025-11-11 15:10:59.838563044 +0000 UTC m=+6070.498852653" lastFinishedPulling="2025-11-11 15:11:02.32280344 +0000 UTC m=+6072.983093059" observedRunningTime="2025-11-11 15:11:02.88058049 +0000 UTC m=+6073.540870109" watchObservedRunningTime="2025-11-11 15:11:02.89045805 +0000 UTC m=+6073.550747669" Nov 11 15:11:06 crc kubenswrapper[4842]: I1111 15:11:06.009139 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6tqv7_e4ac96a0-0de8-47d1-b101-4054af9c7fe0/control-plane-machine-set-operator/0.log" Nov 11 15:11:06 crc kubenswrapper[4842]: I1111 15:11:06.150218 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-97v95_0c277b36-785e-4e8f-828e-17e36dac70be/kube-rbac-proxy/0.log" Nov 11 15:11:06 crc kubenswrapper[4842]: I1111 15:11:06.211337 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-97v95_0c277b36-785e-4e8f-828e-17e36dac70be/machine-api-operator/0.log" Nov 11 15:11:08 crc kubenswrapper[4842]: I1111 15:11:08.828180 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:08 crc kubenswrapper[4842]: I1111 15:11:08.828502 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:08 crc kubenswrapper[4842]: I1111 15:11:08.884227 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:08 crc kubenswrapper[4842]: I1111 15:11:08.970446 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:09 crc kubenswrapper[4842]: I1111 15:11:09.118236 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:11:10 crc kubenswrapper[4842]: I1111 15:11:10.941931 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-klvsk" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="registry-server" containerID="cri-o://3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d" gracePeriod=2 Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.415090 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.579160 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf5rw\" (UniqueName: \"kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw\") pod \"cf1e4df3-235e-47a3-a016-181cc55e89b3\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.579236 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities\") pod \"cf1e4df3-235e-47a3-a016-181cc55e89b3\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.579356 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content\") pod \"cf1e4df3-235e-47a3-a016-181cc55e89b3\" (UID: \"cf1e4df3-235e-47a3-a016-181cc55e89b3\") " Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.580251 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities" (OuterVolumeSpecName: "utilities") pod "cf1e4df3-235e-47a3-a016-181cc55e89b3" (UID: "cf1e4df3-235e-47a3-a016-181cc55e89b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.594844 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf1e4df3-235e-47a3-a016-181cc55e89b3" (UID: "cf1e4df3-235e-47a3-a016-181cc55e89b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.600990 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw" (OuterVolumeSpecName: "kube-api-access-lf5rw") pod "cf1e4df3-235e-47a3-a016-181cc55e89b3" (UID: "cf1e4df3-235e-47a3-a016-181cc55e89b3"). InnerVolumeSpecName "kube-api-access-lf5rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.681996 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf5rw\" (UniqueName: \"kubernetes.io/projected/cf1e4df3-235e-47a3-a016-181cc55e89b3-kube-api-access-lf5rw\") on node \"crc\" DevicePath \"\"" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.682032 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.682044 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf1e4df3-235e-47a3-a016-181cc55e89b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.951812 4842 generic.go:334] "Generic (PLEG): container finished" podID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerID="3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d" exitCode=0 Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.951866 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerDied","Data":"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d"} Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.951893 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-klvsk" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.951930 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-klvsk" event={"ID":"cf1e4df3-235e-47a3-a016-181cc55e89b3","Type":"ContainerDied","Data":"c22f101cc2f742cdbf516b5bd913ea3c5cf5eca08f082e6492951b4eb5c819a6"} Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.951952 4842 scope.go:117] "RemoveContainer" containerID="3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.975377 4842 scope.go:117] "RemoveContainer" containerID="c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5" Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.988564 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:11:11 crc kubenswrapper[4842]: I1111 15:11:11.997407 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-klvsk"] Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.017690 4842 scope.go:117] "RemoveContainer" containerID="7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.047160 4842 scope.go:117] "RemoveContainer" containerID="3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d" Nov 11 15:11:12 crc kubenswrapper[4842]: E1111 15:11:12.047600 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d\": container with ID starting with 3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d not found: ID does not exist" containerID="3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.047645 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d"} err="failed to get container status \"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d\": rpc error: code = NotFound desc = could not find container \"3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d\": container with ID starting with 3170d6b8514d0f05d692f3108dc87b868e783cd62cfe10b143346f8d7b7e993d not found: ID does not exist" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.047674 4842 scope.go:117] "RemoveContainer" containerID="c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5" Nov 11 15:11:12 crc kubenswrapper[4842]: E1111 15:11:12.048066 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5\": container with ID starting with c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5 not found: ID does not exist" containerID="c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.048109 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5"} err="failed to get container status \"c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5\": rpc error: code = NotFound desc = could not find container \"c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5\": container with ID starting with c1a82c0957db9a6da3d9cc8b6f62b2bee79ced9c6f95791c8e44928675a294e5 not found: ID does not exist" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.048134 4842 scope.go:117] "RemoveContainer" containerID="7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0" Nov 11 15:11:12 crc kubenswrapper[4842]: E1111 15:11:12.048569 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0\": container with ID starting with 7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0 not found: ID does not exist" containerID="7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.048595 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0"} err="failed to get container status \"7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0\": rpc error: code = NotFound desc = could not find container \"7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0\": container with ID starting with 7f16de8bdf404e927278b8e4e8f026bb86704a4a1558b4f47354a1cff439b4d0 not found: ID does not exist" Nov 11 15:11:12 crc kubenswrapper[4842]: I1111 15:11:12.069890 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" path="/var/lib/kubelet/pods/cf1e4df3-235e-47a3-a016-181cc55e89b3/volumes" Nov 11 15:11:16 crc kubenswrapper[4842]: I1111 15:11:16.067596 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:11:16 crc kubenswrapper[4842]: E1111 15:11:16.068407 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:11:18 crc kubenswrapper[4842]: I1111 15:11:18.761270 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-fwg9f_14c43b93-a309-434e-8379-c48dca27130f/cert-manager-controller/0.log" Nov 11 15:11:18 crc kubenswrapper[4842]: I1111 15:11:18.906291 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-5ks7v_32eb5634-bee2-4ae4-89ad-cad4e90a79d1/cert-manager-cainjector/0.log" Nov 11 15:11:18 crc kubenswrapper[4842]: I1111 15:11:18.942630 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-djwf2_9548e4cc-cf32-4973-bb29-5525fee6d3e8/cert-manager-webhook/0.log" Nov 11 15:11:29 crc kubenswrapper[4842]: I1111 15:11:29.059653 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:11:29 crc kubenswrapper[4842]: E1111 15:11:29.060531 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.340042 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5859445d84-62bzc_d3481847-dbe0-4b95-ba37-92efb99cbc58/nmstate-console-plugin/0.log" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.489520 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-4rnrz_d3a048b3-b011-4646-a47f-c51fa6177169/nmstate-handler/0.log" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.518662 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z9xns_836465ab-91a1-4433-9182-be504f2d4b33/kube-rbac-proxy/0.log" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.533171 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z9xns_836465ab-91a1-4433-9182-be504f2d4b33/nmstate-metrics/0.log" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.658386 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-65474b4696-mbppg_473113a5-da7d-4f9f-b6c9-865ed25b03fe/nmstate-operator/0.log" Nov 11 15:11:31 crc kubenswrapper[4842]: I1111 15:11:31.726663 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-jzn5j_34787e6f-0d9b-41f6-8cc8-682249a243a2/nmstate-webhook/0.log" Nov 11 15:11:44 crc kubenswrapper[4842]: I1111 15:11:44.059350 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:11:44 crc kubenswrapper[4842]: E1111 15:11:44.060123 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.046010 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-z8m9r_12b20cff-a77f-429a-81f4-ec7e34de65e9/kube-rbac-proxy/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.233299 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-z8m9r_12b20cff-a77f-429a-81f4-ec7e34de65e9/controller/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.273607 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6fbb69bdf8-gf25w_9ea46a4b-b757-47ae-a1b1-d7e82c5980e0/frr-k8s-webhook-server/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.384451 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.573966 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.579667 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.585250 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.613890 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.835660 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.848252 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.850778 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:11:45 crc kubenswrapper[4842]: I1111 15:11:45.857403 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.083777 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.099795 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.149160 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/controller/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.172284 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.328273 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/frr-metrics/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.365530 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/kube-rbac-proxy/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.421648 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/kube-rbac-proxy-frr/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.574284 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/reloader/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.714097 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-579ffdf495-n54pn_58177de2-efee-407d-82ad-b0319114f876/manager/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.801804 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8689778684-tflc4_f426ee4c-af44-4cc2-b9ae-8d83e3816bba/webhook-server/0.log" Nov 11 15:11:46 crc kubenswrapper[4842]: I1111 15:11:46.991031 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vjwsf_5e9d7317-e2f0-4262-a288-adec1afe4657/kube-rbac-proxy/0.log" Nov 11 15:11:47 crc kubenswrapper[4842]: I1111 15:11:47.601432 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vjwsf_5e9d7317-e2f0-4262-a288-adec1afe4657/speaker/0.log" Nov 11 15:11:48 crc kubenswrapper[4842]: I1111 15:11:48.076902 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/frr/0.log" Nov 11 15:11:58 crc kubenswrapper[4842]: I1111 15:11:58.060026 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:11:58 crc kubenswrapper[4842]: E1111 15:11:58.060997 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.240640 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.499846 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.527391 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.536742 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.746328 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/extract/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.756926 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.760687 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:11:59 crc kubenswrapper[4842]: I1111 15:11:59.945755 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.111037 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.116840 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.118241 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.333392 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.360156 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.381751 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/extract/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.539270 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.721561 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.723954 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.748449 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.908879 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:12:00 crc kubenswrapper[4842]: I1111 15:12:00.915309 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.175299 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.399417 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.421870 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.447491 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.549645 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/registry-server/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.634405 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.694538 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:12:01 crc kubenswrapper[4842]: I1111 15:12:01.936588 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-94sst_46efe664-2d21-4657-b466-579abe4f7f02/marketplace-operator/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.115365 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.429401 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.430433 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.456926 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/registry-server/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.464285 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.610598 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.675810 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.834144 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/registry-server/0.log" Nov 11 15:12:02 crc kubenswrapper[4842]: I1111 15:12:02.843893 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:12:03 crc kubenswrapper[4842]: I1111 15:12:03.041534 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:12:03 crc kubenswrapper[4842]: I1111 15:12:03.054243 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:12:03 crc kubenswrapper[4842]: I1111 15:12:03.060320 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:12:03 crc kubenswrapper[4842]: I1111 15:12:03.311763 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:12:03 crc kubenswrapper[4842]: I1111 15:12:03.318899 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:12:04 crc kubenswrapper[4842]: I1111 15:12:04.006391 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/registry-server/0.log" Nov 11 15:12:09 crc kubenswrapper[4842]: I1111 15:12:09.059395 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:12:09 crc kubenswrapper[4842]: E1111 15:12:09.060125 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:12:14 crc kubenswrapper[4842]: I1111 15:12:14.613678 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-7c8cf85677-5lnv2_36861a8f-d7ae-47db-b504-1eb8a1694af7/prometheus-operator/0.log" Nov 11 15:12:14 crc kubenswrapper[4842]: I1111 15:12:14.765241 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6b97475496-6sfgg_dc69b653-7e30-40ed-995a-bd2ca759365c/prometheus-operator-admission-webhook/0.log" Nov 11 15:12:14 crc kubenswrapper[4842]: I1111 15:12:14.880812 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6b97475496-gxfm8_688f6c76-6b40-4937-9040-6fc178c7740d/prometheus-operator-admission-webhook/0.log" Nov 11 15:12:14 crc kubenswrapper[4842]: I1111 15:12:14.971168 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-cc5f78dfc-8ckgj_ad3cf656-3491-4507-bd22-df41ef4576d8/operator/0.log" Nov 11 15:12:15 crc kubenswrapper[4842]: I1111 15:12:15.084452 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-54bc95c9fb-hw9wl_582cbdc6-be31-4fde-904d-820ea6228929/perses-operator/0.log" Nov 11 15:12:24 crc kubenswrapper[4842]: I1111 15:12:24.059390 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:12:24 crc kubenswrapper[4842]: E1111 15:12:24.060148 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:12:39 crc kubenswrapper[4842]: I1111 15:12:39.060809 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:12:39 crc kubenswrapper[4842]: E1111 15:12:39.061911 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:12:50 crc kubenswrapper[4842]: I1111 15:12:50.065490 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:12:50 crc kubenswrapper[4842]: E1111 15:12:50.066295 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:13:02 crc kubenswrapper[4842]: I1111 15:13:02.060157 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:13:02 crc kubenswrapper[4842]: E1111 15:13:02.061945 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:13:14 crc kubenswrapper[4842]: I1111 15:13:14.064636 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:13:14 crc kubenswrapper[4842]: E1111 15:13:14.065374 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:13:27 crc kubenswrapper[4842]: I1111 15:13:27.060293 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:13:27 crc kubenswrapper[4842]: E1111 15:13:27.061384 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:13:40 crc kubenswrapper[4842]: I1111 15:13:40.077192 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:13:40 crc kubenswrapper[4842]: E1111 15:13:40.077958 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:13:52 crc kubenswrapper[4842]: I1111 15:13:52.093068 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:13:52 crc kubenswrapper[4842]: E1111 15:13:52.094372 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:14:06 crc kubenswrapper[4842]: I1111 15:14:06.061706 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:14:06 crc kubenswrapper[4842]: E1111 15:14:06.063605 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:14:14 crc kubenswrapper[4842]: I1111 15:14:14.162690 4842 generic.go:334] "Generic (PLEG): container finished" podID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerID="647de456b4edaff633309a50709a3b2fc8be3018b0d4e7489d8014408c8c09a9" exitCode=0 Nov 11 15:14:14 crc kubenswrapper[4842]: I1111 15:14:14.162897 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-hhh5t/must-gather-z76bs" event={"ID":"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070","Type":"ContainerDied","Data":"647de456b4edaff633309a50709a3b2fc8be3018b0d4e7489d8014408c8c09a9"} Nov 11 15:14:14 crc kubenswrapper[4842]: I1111 15:14:14.164387 4842 scope.go:117] "RemoveContainer" containerID="647de456b4edaff633309a50709a3b2fc8be3018b0d4e7489d8014408c8c09a9" Nov 11 15:14:14 crc kubenswrapper[4842]: I1111 15:14:14.647885 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hhh5t_must-gather-z76bs_e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070/gather/0.log" Nov 11 15:14:20 crc kubenswrapper[4842]: I1111 15:14:20.070207 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:14:20 crc kubenswrapper[4842]: E1111 15:14:20.070829 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.098150 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-hhh5t/must-gather-z76bs"] Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.098883 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-hhh5t/must-gather-z76bs" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="copy" containerID="cri-o://e38056bce75c40807c20eaa7d1392ed3f11d6d0cb2a68d67be40723f4b424e3f" gracePeriod=2 Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.114575 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-hhh5t/must-gather-z76bs"] Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.327600 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hhh5t_must-gather-z76bs_e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070/copy/0.log" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.330249 4842 generic.go:334] "Generic (PLEG): container finished" podID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerID="e38056bce75c40807c20eaa7d1392ed3f11d6d0cb2a68d67be40723f4b424e3f" exitCode=143 Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.650761 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hhh5t_must-gather-z76bs_e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070/copy/0.log" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.651571 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.679496 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwv9c\" (UniqueName: \"kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c\") pod \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.679990 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output\") pod \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\" (UID: \"e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070\") " Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.685538 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c" (OuterVolumeSpecName: "kube-api-access-jwv9c") pod "e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" (UID: "e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070"). InnerVolumeSpecName "kube-api-access-jwv9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.782973 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwv9c\" (UniqueName: \"kubernetes.io/projected/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-kube-api-access-jwv9c\") on node \"crc\" DevicePath \"\"" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.857045 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" (UID: "e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:14:23 crc kubenswrapper[4842]: I1111 15:14:23.883879 4842 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 11 15:14:24 crc kubenswrapper[4842]: I1111 15:14:24.071941 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" path="/var/lib/kubelet/pods/e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070/volumes" Nov 11 15:14:24 crc kubenswrapper[4842]: I1111 15:14:24.340762 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-hhh5t_must-gather-z76bs_e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070/copy/0.log" Nov 11 15:14:24 crc kubenswrapper[4842]: I1111 15:14:24.341929 4842 scope.go:117] "RemoveContainer" containerID="e38056bce75c40807c20eaa7d1392ed3f11d6d0cb2a68d67be40723f4b424e3f" Nov 11 15:14:24 crc kubenswrapper[4842]: I1111 15:14:24.342010 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-hhh5t/must-gather-z76bs" Nov 11 15:14:24 crc kubenswrapper[4842]: I1111 15:14:24.370034 4842 scope.go:117] "RemoveContainer" containerID="647de456b4edaff633309a50709a3b2fc8be3018b0d4e7489d8014408c8c09a9" Nov 11 15:14:32 crc kubenswrapper[4842]: I1111 15:14:32.059431 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:14:32 crc kubenswrapper[4842]: E1111 15:14:32.060930 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:14:45 crc kubenswrapper[4842]: I1111 15:14:45.058746 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:14:45 crc kubenswrapper[4842]: I1111 15:14:45.591596 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1"} Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.168237 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8"] Nov 11 15:15:00 crc kubenswrapper[4842]: E1111 15:15:00.169283 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="extract-utilities" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169303 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="extract-utilities" Nov 11 15:15:00 crc kubenswrapper[4842]: E1111 15:15:00.169329 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="registry-server" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169338 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="registry-server" Nov 11 15:15:00 crc kubenswrapper[4842]: E1111 15:15:00.169365 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="copy" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169372 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="copy" Nov 11 15:15:00 crc kubenswrapper[4842]: E1111 15:15:00.169401 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="extract-content" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169408 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="extract-content" Nov 11 15:15:00 crc kubenswrapper[4842]: E1111 15:15:00.169425 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="gather" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169432 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="gather" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169661 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="gather" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169687 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6a362ce-7e8d-4fd7-8de4-0d08dd5c2070" containerName="copy" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.169713 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf1e4df3-235e-47a3-a016-181cc55e89b3" containerName="registry-server" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.170643 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.178412 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.178821 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.182199 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8"] Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.321550 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbmrh\" (UniqueName: \"kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.321601 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.321671 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.423824 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbmrh\" (UniqueName: \"kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.423872 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.423940 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.425152 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.431409 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.440837 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbmrh\" (UniqueName: \"kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh\") pod \"collect-profiles-29381235-swtw8\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.492427 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:00 crc kubenswrapper[4842]: I1111 15:15:00.970027 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8"] Nov 11 15:15:01 crc kubenswrapper[4842]: I1111 15:15:01.786011 4842 generic.go:334] "Generic (PLEG): container finished" podID="b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" containerID="384201f884e5406b9efb28fe84c3b402c51bb23c9d236044f4103b3ef2bdf497" exitCode=0 Nov 11 15:15:01 crc kubenswrapper[4842]: I1111 15:15:01.786330 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" event={"ID":"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e","Type":"ContainerDied","Data":"384201f884e5406b9efb28fe84c3b402c51bb23c9d236044f4103b3ef2bdf497"} Nov 11 15:15:01 crc kubenswrapper[4842]: I1111 15:15:01.786547 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" event={"ID":"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e","Type":"ContainerStarted","Data":"1451a89e3a866c67f1b1d317b237b580bfe888204522045985f9cadaeea66a9b"} Nov 11 15:15:02 crc kubenswrapper[4842]: I1111 15:15:02.669791 4842 scope.go:117] "RemoveContainer" containerID="517cd8a5663d214df965203757d7e2a2fe4dfe89b1744e4da177ce32425b8b31" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.141337 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.178851 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume\") pod \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.179251 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbmrh\" (UniqueName: \"kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh\") pod \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.179292 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume\") pod \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\" (UID: \"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e\") " Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.180148 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume" (OuterVolumeSpecName: "config-volume") pod "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" (UID: "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.186332 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" (UID: "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.187640 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh" (OuterVolumeSpecName: "kube-api-access-tbmrh") pod "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" (UID: "b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e"). InnerVolumeSpecName "kube-api-access-tbmrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.281750 4842 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.281790 4842 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.281804 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbmrh\" (UniqueName: \"kubernetes.io/projected/b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e-kube-api-access-tbmrh\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.806389 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" event={"ID":"b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e","Type":"ContainerDied","Data":"1451a89e3a866c67f1b1d317b237b580bfe888204522045985f9cadaeea66a9b"} Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.806759 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1451a89e3a866c67f1b1d317b237b580bfe888204522045985f9cadaeea66a9b" Nov 11 15:15:03 crc kubenswrapper[4842]: I1111 15:15:03.806823 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29381235-swtw8" Nov 11 15:15:04 crc kubenswrapper[4842]: I1111 15:15:04.232870 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw"] Nov 11 15:15:04 crc kubenswrapper[4842]: I1111 15:15:04.241225 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29381190-8nplw"] Nov 11 15:15:06 crc kubenswrapper[4842]: I1111 15:15:06.072868 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99401aeb-e1d9-49b0-a8c9-f00396fe6b45" path="/var/lib/kubelet/pods/99401aeb-e1d9-49b0-a8c9-f00396fe6b45/volumes" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.380899 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jmq82/must-gather-44lgc"] Nov 11 15:15:07 crc kubenswrapper[4842]: E1111 15:15:07.381489 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" containerName="collect-profiles" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.381509 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" containerName="collect-profiles" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.381910 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="b347f5d7-e4fe-4e67-8faf-4ea2e1784b8e" containerName="collect-profiles" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.383431 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.396670 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jmq82"/"openshift-service-ca.crt" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.397072 4842 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jmq82"/"kube-root-ca.crt" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.405743 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jmq82/must-gather-44lgc"] Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.480568 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.480673 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwphp\" (UniqueName: \"kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.582388 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.582731 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwphp\" (UniqueName: \"kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.583077 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.604180 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwphp\" (UniqueName: \"kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp\") pod \"must-gather-44lgc\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:07 crc kubenswrapper[4842]: I1111 15:15:07.705952 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:15:08 crc kubenswrapper[4842]: I1111 15:15:08.184765 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jmq82/must-gather-44lgc"] Nov 11 15:15:08 crc kubenswrapper[4842]: I1111 15:15:08.862319 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/must-gather-44lgc" event={"ID":"3aacdbf3-901a-4354-b448-30ef13002a5a","Type":"ContainerStarted","Data":"a77e7b1118fffdf995aa5b4fc5650cd9335f69fa532b74827d447c1e0353ca22"} Nov 11 15:15:08 crc kubenswrapper[4842]: I1111 15:15:08.862378 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/must-gather-44lgc" event={"ID":"3aacdbf3-901a-4354-b448-30ef13002a5a","Type":"ContainerStarted","Data":"2e83745d26468a39f369ed9e07fa478613aa271fd12aaf6a9acf31d98daa9f57"} Nov 11 15:15:08 crc kubenswrapper[4842]: I1111 15:15:08.862393 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/must-gather-44lgc" event={"ID":"3aacdbf3-901a-4354-b448-30ef13002a5a","Type":"ContainerStarted","Data":"0945db331bba89366c74190d1b46547b919351882c4ab906b7eadc46a0aaddcc"} Nov 11 15:15:08 crc kubenswrapper[4842]: I1111 15:15:08.880187 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jmq82/must-gather-44lgc" podStartSLOduration=1.880166232 podStartE2EDuration="1.880166232s" podCreationTimestamp="2025-11-11 15:15:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 15:15:08.876429815 +0000 UTC m=+6319.536719434" watchObservedRunningTime="2025-11-11 15:15:08.880166232 +0000 UTC m=+6319.540455851" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.168397 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jmq82/crc-debug-8q4zw"] Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.171272 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.174360 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-jmq82"/"default-dockercfg-pxzxm" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.307980 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g94f\" (UniqueName: \"kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.308125 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.410811 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g94f\" (UniqueName: \"kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.410900 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.411047 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.432483 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g94f\" (UniqueName: \"kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f\") pod \"crc-debug-8q4zw\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.496671 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:12 crc kubenswrapper[4842]: I1111 15:15:12.901747 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" event={"ID":"c7fd4aaa-b75a-4765-b340-a6613d5b6757","Type":"ContainerStarted","Data":"d9f9cfce13b6b5ff0ac622bf8bdd1e698a6187e7322f859a3f635f2a8387ed0b"} Nov 11 15:15:13 crc kubenswrapper[4842]: I1111 15:15:13.911790 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" event={"ID":"c7fd4aaa-b75a-4765-b340-a6613d5b6757","Type":"ContainerStarted","Data":"748e87d3923abcda9f45015a72e351f1994883c4ecaa4a1fc7457966acbe4020"} Nov 11 15:15:53 crc kubenswrapper[4842]: I1111 15:15:53.909573 4842 generic.go:334] "Generic (PLEG): container finished" podID="c7fd4aaa-b75a-4765-b340-a6613d5b6757" containerID="748e87d3923abcda9f45015a72e351f1994883c4ecaa4a1fc7457966acbe4020" exitCode=0 Nov 11 15:15:53 crc kubenswrapper[4842]: I1111 15:15:53.909650 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" event={"ID":"c7fd4aaa-b75a-4765-b340-a6613d5b6757","Type":"ContainerDied","Data":"748e87d3923abcda9f45015a72e351f1994883c4ecaa4a1fc7457966acbe4020"} Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.052330 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.082011 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-8q4zw"] Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.091944 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-8q4zw"] Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.187219 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g94f\" (UniqueName: \"kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f\") pod \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.187714 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host\") pod \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\" (UID: \"c7fd4aaa-b75a-4765-b340-a6613d5b6757\") " Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.188431 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host" (OuterVolumeSpecName: "host") pod "c7fd4aaa-b75a-4765-b340-a6613d5b6757" (UID: "c7fd4aaa-b75a-4765-b340-a6613d5b6757"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.194976 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f" (OuterVolumeSpecName: "kube-api-access-7g94f") pod "c7fd4aaa-b75a-4765-b340-a6613d5b6757" (UID: "c7fd4aaa-b75a-4765-b340-a6613d5b6757"). InnerVolumeSpecName "kube-api-access-7g94f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.290837 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g94f\" (UniqueName: \"kubernetes.io/projected/c7fd4aaa-b75a-4765-b340-a6613d5b6757-kube-api-access-7g94f\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.290879 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7fd4aaa-b75a-4765-b340-a6613d5b6757-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.927837 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9f9cfce13b6b5ff0ac622bf8bdd1e698a6187e7322f859a3f635f2a8387ed0b" Nov 11 15:15:55 crc kubenswrapper[4842]: I1111 15:15:55.928206 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-8q4zw" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.077198 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7fd4aaa-b75a-4765-b340-a6613d5b6757" path="/var/lib/kubelet/pods/c7fd4aaa-b75a-4765-b340-a6613d5b6757/volumes" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.305981 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jmq82/crc-debug-78prg"] Nov 11 15:15:56 crc kubenswrapper[4842]: E1111 15:15:56.306708 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7fd4aaa-b75a-4765-b340-a6613d5b6757" containerName="container-00" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.306725 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7fd4aaa-b75a-4765-b340-a6613d5b6757" containerName="container-00" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.306935 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7fd4aaa-b75a-4765-b340-a6613d5b6757" containerName="container-00" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.308850 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.314098 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-jmq82"/"default-dockercfg-pxzxm" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.411227 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7srv2\" (UniqueName: \"kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.411499 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.514044 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7srv2\" (UniqueName: \"kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.514131 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.514318 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.537994 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7srv2\" (UniqueName: \"kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2\") pod \"crc-debug-78prg\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.626358 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.937949 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-78prg" event={"ID":"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1","Type":"ContainerStarted","Data":"3962d2036cad4d6fc1bd9e912b973e0e7b1af228b0a2991ae2975865793084b7"} Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.938470 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-78prg" event={"ID":"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1","Type":"ContainerStarted","Data":"f88574f9f658fa629ddbe89b0375c9a34a7abec53a260a855ae822ab2507e1c8"} Nov 11 15:15:56 crc kubenswrapper[4842]: I1111 15:15:56.956880 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jmq82/crc-debug-78prg" podStartSLOduration=0.956859153 podStartE2EDuration="956.859153ms" podCreationTimestamp="2025-11-11 15:15:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-11 15:15:56.953416545 +0000 UTC m=+6367.613706164" watchObservedRunningTime="2025-11-11 15:15:56.956859153 +0000 UTC m=+6367.617148772" Nov 11 15:15:57 crc kubenswrapper[4842]: I1111 15:15:57.950254 4842 generic.go:334] "Generic (PLEG): container finished" podID="23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" containerID="3962d2036cad4d6fc1bd9e912b973e0e7b1af228b0a2991ae2975865793084b7" exitCode=0 Nov 11 15:15:57 crc kubenswrapper[4842]: I1111 15:15:57.950317 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-78prg" event={"ID":"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1","Type":"ContainerDied","Data":"3962d2036cad4d6fc1bd9e912b973e0e7b1af228b0a2991ae2975865793084b7"} Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.076939 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.170366 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7srv2\" (UniqueName: \"kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2\") pod \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.170679 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host\") pod \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\" (UID: \"23150ec7-d1fe-4fa3-a698-87c4d9f98fe1\") " Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.170766 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host" (OuterVolumeSpecName: "host") pod "23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" (UID: "23150ec7-d1fe-4fa3-a698-87c4d9f98fe1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.171131 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.177326 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2" (OuterVolumeSpecName: "kube-api-access-7srv2") pod "23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" (UID: "23150ec7-d1fe-4fa3-a698-87c4d9f98fe1"). InnerVolumeSpecName "kube-api-access-7srv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.230779 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-78prg"] Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.247608 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-78prg"] Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.272612 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7srv2\" (UniqueName: \"kubernetes.io/projected/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1-kube-api-access-7srv2\") on node \"crc\" DevicePath \"\"" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.969801 4842 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f88574f9f658fa629ddbe89b0375c9a34a7abec53a260a855ae822ab2507e1c8" Nov 11 15:15:59 crc kubenswrapper[4842]: I1111 15:15:59.969892 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-78prg" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.069411 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" path="/var/lib/kubelet/pods/23150ec7-d1fe-4fa3-a698-87c4d9f98fe1/volumes" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.406162 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jmq82/crc-debug-7lt48"] Nov 11 15:16:00 crc kubenswrapper[4842]: E1111 15:16:00.406832 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" containerName="container-00" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.406843 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" containerName="container-00" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.407029 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="23150ec7-d1fe-4fa3-a698-87c4d9f98fe1" containerName="container-00" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.407675 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.409894 4842 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-jmq82"/"default-dockercfg-pxzxm" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.495009 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wwgm\" (UniqueName: \"kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.495072 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.596836 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wwgm\" (UniqueName: \"kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.596945 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.597322 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.619348 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wwgm\" (UniqueName: \"kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm\") pod \"crc-debug-7lt48\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.725262 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:00 crc kubenswrapper[4842]: W1111 15:16:00.755405 4842 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f529955_0fd0_4ee6_9d6e_435b7528fd28.slice/crio-694d76f6aabf7432863633eac5d83d7eba8e038540c354aee8128417c5ab1c0a WatchSource:0}: Error finding container 694d76f6aabf7432863633eac5d83d7eba8e038540c354aee8128417c5ab1c0a: Status 404 returned error can't find the container with id 694d76f6aabf7432863633eac5d83d7eba8e038540c354aee8128417c5ab1c0a Nov 11 15:16:00 crc kubenswrapper[4842]: I1111 15:16:00.980287 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-7lt48" event={"ID":"0f529955-0fd0-4ee6-9d6e-435b7528fd28","Type":"ContainerStarted","Data":"694d76f6aabf7432863633eac5d83d7eba8e038540c354aee8128417c5ab1c0a"} Nov 11 15:16:01 crc kubenswrapper[4842]: I1111 15:16:01.991914 4842 generic.go:334] "Generic (PLEG): container finished" podID="0f529955-0fd0-4ee6-9d6e-435b7528fd28" containerID="654606db4da44fa70ec895c054ce5aec3d4a8142925a20d862988f8dd9fde6e9" exitCode=0 Nov 11 15:16:01 crc kubenswrapper[4842]: I1111 15:16:01.991977 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/crc-debug-7lt48" event={"ID":"0f529955-0fd0-4ee6-9d6e-435b7528fd28","Type":"ContainerDied","Data":"654606db4da44fa70ec895c054ce5aec3d4a8142925a20d862988f8dd9fde6e9"} Nov 11 15:16:02 crc kubenswrapper[4842]: I1111 15:16:02.041373 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-7lt48"] Nov 11 15:16:02 crc kubenswrapper[4842]: I1111 15:16:02.048018 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jmq82/crc-debug-7lt48"] Nov 11 15:16:02 crc kubenswrapper[4842]: I1111 15:16:02.747769 4842 scope.go:117] "RemoveContainer" containerID="105e727b8bae5c32aabfc19d54377049f10ae20cc330e4998c28480ad96eb4f9" Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.131141 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.273419 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wwgm\" (UniqueName: \"kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm\") pod \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.273723 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host\") pod \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\" (UID: \"0f529955-0fd0-4ee6-9d6e-435b7528fd28\") " Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.273802 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host" (OuterVolumeSpecName: "host") pod "0f529955-0fd0-4ee6-9d6e-435b7528fd28" (UID: "0f529955-0fd0-4ee6-9d6e-435b7528fd28"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.274480 4842 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0f529955-0fd0-4ee6-9d6e-435b7528fd28-host\") on node \"crc\" DevicePath \"\"" Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.300295 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm" (OuterVolumeSpecName: "kube-api-access-5wwgm") pod "0f529955-0fd0-4ee6-9d6e-435b7528fd28" (UID: "0f529955-0fd0-4ee6-9d6e-435b7528fd28"). InnerVolumeSpecName "kube-api-access-5wwgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:16:03 crc kubenswrapper[4842]: I1111 15:16:03.376146 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wwgm\" (UniqueName: \"kubernetes.io/projected/0f529955-0fd0-4ee6-9d6e-435b7528fd28-kube-api-access-5wwgm\") on node \"crc\" DevicePath \"\"" Nov 11 15:16:04 crc kubenswrapper[4842]: I1111 15:16:04.012403 4842 scope.go:117] "RemoveContainer" containerID="654606db4da44fa70ec895c054ce5aec3d4a8142925a20d862988f8dd9fde6e9" Nov 11 15:16:04 crc kubenswrapper[4842]: I1111 15:16:04.012454 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/crc-debug-7lt48" Nov 11 15:16:04 crc kubenswrapper[4842]: I1111 15:16:04.072578 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f529955-0fd0-4ee6-9d6e-435b7528fd28" path="/var/lib/kubelet/pods/0f529955-0fd0-4ee6-9d6e-435b7528fd28/volumes" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.203186 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-95f8fc9b8-pc2pp_b13026a5-f118-43d9-b363-84f9ae14379c/barbican-api/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.295223 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-95f8fc9b8-pc2pp_b13026a5-f118-43d9-b363-84f9ae14379c/barbican-api-log/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.475374 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dfc9c458b-t66x8_e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef/barbican-keystone-listener/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.533459 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5dfc9c458b-t66x8_e39b27ef-b1b6-4624-9e43-ac4f1d3fa0ef/barbican-keystone-listener-log/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.639037 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b56b7d577-bj4cr_c1fe7c75-ea0d-41ed-b79a-7ecce3779047/barbican-worker/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.663439 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b56b7d577-bj4cr_c1fe7c75-ea0d-41ed-b79a-7ecce3779047/barbican-worker-log/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.819722 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-pbqj2_cbeea580-daef-4e97-898b-c194a52a4e97/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:37 crc kubenswrapper[4842]: I1111 15:16:37.961126 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/ceilometer-central-agent/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.037036 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/ceilometer-notification-agent/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.097022 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/proxy-httpd/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.172269 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_534497bc-bc86-410d-88c9-ef65d8e2463c/sg-core/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.357040 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_317f46b7-2e01-425e-8d9a-7df1c63a0d34/cinder-api-log/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.624568 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_07ee075d-6090-4f91-9908-223be5beff86/probe/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.770901 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_07ee075d-6090-4f91-9908-223be5beff86/cinder-backup/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.897937 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5eec4bcc-3000-4c55-99ea-0bee19c6fa86/cinder-scheduler/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.907094 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5eec4bcc-3000-4c55-99ea-0bee19c6fa86/probe/0.log" Nov 11 15:16:38 crc kubenswrapper[4842]: I1111 15:16:38.976513 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_317f46b7-2e01-425e-8d9a-7df1c63a0d34/cinder-api/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.196360 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_3c06a622-086f-4df5-beaa-67d62802c249/probe/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.363997 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-0_3c06a622-086f-4df5-beaa-67d62802c249/cinder-volume/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.480663 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_51936d85-49d4-4413-b8f0-0c582381a663/cinder-volume/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.509375 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-nfs-2-0_51936d85-49d4-4413-b8f0-0c582381a663/probe/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.590004 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-79s62_53ccc159-9d7f-41b0-8f5d-bc9521be7f1a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.736290 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-btvfg_f94c8fa3-0b93-4cf3-9aae-9feb9bc79273/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.826078 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/init/0.log" Nov 11 15:16:39 crc kubenswrapper[4842]: I1111 15:16:39.934018 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/init/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.028119 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-wfksg_77d635ff-fb62-482a-b81e-18a8e371d404/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.161123 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-846db6f4bf-jr7k5_e6ec519f-08c0-4b5d-a698-3ecb9933613f/dnsmasq-dns/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.249636 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_069ae2bc-eee5-4b02-9dd7-602303027ee4/glance-httpd/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.283876 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_069ae2bc-eee5-4b02-9dd7-602303027ee4/glance-log/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.394337 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_762a07fe-46d0-4852-bfef-6ed8007dcd63/glance-httpd/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.468267 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_762a07fe-46d0-4852-bfef-6ed8007dcd63/glance-log/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.752222 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7ccc6f5856-tt6gw_dde43d8b-9a6f-4506-9285-0606a6e04361/horizon/0.log" Nov 11 15:16:40 crc kubenswrapper[4842]: I1111 15:16:40.752321 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-sm5cs_7252c6dc-fc9e-44ee-bea7-1b61760f4f8e/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.262402 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7ccc6f5856-tt6gw_dde43d8b-9a6f-4506-9285-0606a6e04361/horizon-log/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.305385 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-8drsm_a6455983-1479-4b83-a9ba-2aef71382fc7/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.467464 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29381161-b8tsl_4401a195-a0c5-46b6-9b52-8e83c88ef55d/keystone-cron/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.562203 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29381221-6dgbk_5b5757ad-60d5-4159-a919-b2b784bd5072/keystone-cron/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.754353 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c6ccd957-kmlcp_e5ae2f35-b0d7-480b-8f4d-cda875e63ec2/keystone-api/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.758314 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_5f2c2abf-63ce-49fa-a178-57088955a295/kube-state-metrics/0.log" Nov 11 15:16:41 crc kubenswrapper[4842]: I1111 15:16:41.803257 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-4j6bx_28de897b-72a6-4d7b-b7e7-e205a32fe32d/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:42 crc kubenswrapper[4842]: I1111 15:16:42.300133 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-5fgcp_576c96ec-4ad4-4eee-ae3b-10b4b4aa5524/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:42 crc kubenswrapper[4842]: I1111 15:16:42.408341 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cbfd54f69-xg8r8_18c3a0a5-32fd-44f4-8d0e-beb556aab16b/neutron-httpd/0.log" Nov 11 15:16:42 crc kubenswrapper[4842]: I1111 15:16:42.460607 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6cbfd54f69-xg8r8_18c3a0a5-32fd-44f4-8d0e-beb556aab16b/neutron-api/0.log" Nov 11 15:16:43 crc kubenswrapper[4842]: I1111 15:16:43.163127 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_c743e88d-2ca8-45a8-9e26-7106975b5be3/nova-cell0-conductor-conductor/0.log" Nov 11 15:16:43 crc kubenswrapper[4842]: I1111 15:16:43.446362 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7d7cbe2b-2615-4ee7-882a-9aebdbd4fa49/nova-cell1-conductor-conductor/0.log" Nov 11 15:16:43 crc kubenswrapper[4842]: I1111 15:16:43.845431 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_96d33b5d-2ce7-480d-8e48-1badc4624a2f/nova-cell1-novncproxy-novncproxy/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.019602 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-65pxq_908c0c25-452c-4fba-9fbd-d76fa35416af/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.277929 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d993b82e-e35e-44d7-9e76-1bae92e42c25/nova-metadata-log/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.409853 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_23d198b8-bfde-44cc-909f-593c5a1968a3/nova-api-log/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.809180 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_23d198b8-bfde-44cc-909f-593c5a1968a3/nova-api-api/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.974110 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/mysql-bootstrap/0.log" Nov 11 15:16:44 crc kubenswrapper[4842]: I1111 15:16:44.991992 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_c57355b5-f8b3-463a-933b-fedf7d2886a6/nova-scheduler-scheduler/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.288272 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/galera/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.296771 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a6225291-4a01-43af-ba67-f5281c2bd436/mysql-bootstrap/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.545614 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/mysql-bootstrap/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.744964 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/mysql-bootstrap/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.753379 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c6cfb8ab-08d0-48f5-8ffc-2a2d33107eca/galera/0.log" Nov 11 15:16:45 crc kubenswrapper[4842]: I1111 15:16:45.954820 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_414a02e6-eebe-4988-99fd-1bf1651fa858/openstackclient/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.026001 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-f7sn5_6ae937ce-ab8b-471f-b809-821ca6f23ecd/ovn-controller/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.296726 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-rlvf6_df77e52b-398e-454a-bbf5-0bac66c17380/openstack-network-exporter/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.494339 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server-init/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.658300 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server-init/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.708681 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovsdb-server/0.log" Nov 11 15:16:46 crc kubenswrapper[4842]: I1111 15:16:46.757157 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d993b82e-e35e-44d7-9e76-1bae92e42c25/nova-metadata-metadata/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.023557 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-sqg56_92fb25d5-d93a-4932-8d37-94ca7302c774/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.075069 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cc6nz_e04e103c-eb86-4e27-b5ac-0d4faf32d1f5/ovs-vswitchd/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.116384 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e0df8170-d8dd-4ad1-9a30-d60e06fa07f7/openstack-network-exporter/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.264233 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9f712abc-0d24-4fc2-a103-c102a8833466/openstack-network-exporter/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.295770 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e0df8170-d8dd-4ad1-9a30-d60e06fa07f7/ovn-northd/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.375732 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_9f712abc-0d24-4fc2-a103-c102a8833466/ovsdbserver-nb/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.510475 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85/openstack-network-exporter/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.537680 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_435a5bca-7dc1-44f3-8b1f-8ecee7fd4e85/ovsdbserver-sb/0.log" Nov 11 15:16:47 crc kubenswrapper[4842]: I1111 15:16:47.939397 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/init-config-reloader/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.027450 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-94c6f6d9b-ns8g4_2494da24-b74f-4317-8bf2-80e0335c5648/placement-api/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.114678 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-94c6f6d9b-ns8g4_2494da24-b74f-4317-8bf2-80e0335c5648/placement-log/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.173482 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/init-config-reloader/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.225013 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/config-reloader/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.260859 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/prometheus/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.367963 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_70812af6-a8c3-4e0c-93b6-017fd4117173/thanos-sidecar/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.429792 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/setup-container/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.654703 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/setup-container/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.732776 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/setup-container/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.742347 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_426f2645-ec57-40e9-b41f-3d1b38a42d04/rabbitmq/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.910587 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/setup-container/0.log" Nov 11 15:16:48 crc kubenswrapper[4842]: I1111 15:16:48.996498 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_121e4ffa-c7c1-40ef-a668-500b2cc8fba6/rabbitmq/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.024093 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/setup-container/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.254914 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/rabbitmq/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.258193 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-bfrqh_c87f657c-bfbc-4d66-9a66-f751fa8ac3ac/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.309245 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d18de6e6-d3e2-41fd-83df-a0d75a1fd978/setup-container/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.485949 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-wz4g4_c8e3c3e6-73ba-490d-b0f5-c99a557f7129/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.561657 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qgcb2_50a93092-7567-4563-a8cc-9393aaf10eae/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.719238 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-h2rqw_3250570c-99a1-4981-a05a-4ba474ed0ab2/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:49 crc kubenswrapper[4842]: I1111 15:16:49.741309 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-tmvxh_448641d9-e39c-4fe4-bc02-cbf87ea74789/ssh-known-hosts-edpm-deployment/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.025516 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6fb8686df5-bdtrx_65b2ac4c-d60a-4926-a3b1-88018ce9c369/proxy-server/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.164782 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-z5kzq_8b12adcf-9678-4493-8035-061dcdf98b6e/swift-ring-rebalance/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.201869 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6fb8686df5-bdtrx_65b2ac4c-d60a-4926-a3b1-88018ce9c369/proxy-httpd/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.303147 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-auditor/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.377400 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-reaper/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.503811 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-replicator/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.530273 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/account-server/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.569333 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-auditor/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.638730 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-replicator/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.700550 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-server/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.790718 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/container-updater/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.824359 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-auditor/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.904826 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-expirer/0.log" Nov 11 15:16:50 crc kubenswrapper[4842]: I1111 15:16:50.934008 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-replicator/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.037674 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-server/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.061891 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/object-updater/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.138367 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/rsync/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.169273 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2a1e5076-485c-4759-ba37-33e161741f74/swift-recon-cron/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.291507 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-mwg5c_c203745d-d249-4515-ac25-d99b78d65d2e/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.442907 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_42e4762f-5636-4ea5-914b-142ccc708e6d/tempest-tests-tempest-tests-runner/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.655466 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_9103e703-2527-4102-948b-a6f7e05b2e5a/test-operator-logs-container/0.log" Nov 11 15:16:51 crc kubenswrapper[4842]: I1111 15:16:51.666583 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bs62h_b96d38f2-b032-495a-8296-72c06458c86f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 11 15:16:52 crc kubenswrapper[4842]: I1111 15:16:52.639180 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_9f38d3ba-5c82-4503-a865-35767c1f1147/watcher-applier/0.log" Nov 11 15:16:53 crc kubenswrapper[4842]: I1111 15:16:53.120682 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_bad883a9-7045-46c2-8358-aa3a6d8f7f01/watcher-api-log/0.log" Nov 11 15:16:56 crc kubenswrapper[4842]: I1111 15:16:56.005542 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_3bf4b336-4ea3-4fe5-ad16-5a6047338cf3/watcher-decision-engine/0.log" Nov 11 15:16:57 crc kubenswrapper[4842]: I1111 15:16:57.364487 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_bad883a9-7045-46c2-8358-aa3a6d8f7f01/watcher-api/0.log" Nov 11 15:17:05 crc kubenswrapper[4842]: I1111 15:17:05.134497 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_fd2b40ae-3270-4f5b-9700-026adaf919ca/memcached/0.log" Nov 11 15:17:14 crc kubenswrapper[4842]: I1111 15:17:14.960575 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:17:14 crc kubenswrapper[4842]: I1111 15:17:14.961088 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:17:19 crc kubenswrapper[4842]: I1111 15:17:19.671672 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6999776966-pnbdh_b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e/kube-rbac-proxy/0.log" Nov 11 15:17:19 crc kubenswrapper[4842]: I1111 15:17:19.756454 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6999776966-pnbdh_b01ab86b-b1f6-4f5e-ba91-06f6bb652d4e/manager/0.log" Nov 11 15:17:19 crc kubenswrapper[4842]: I1111 15:17:19.880598 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8dffd86b7-rldzd_b7462081-0162-4bd6-96fe-23a8c29df0db/kube-rbac-proxy/0.log" Nov 11 15:17:19 crc kubenswrapper[4842]: I1111 15:17:19.990655 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8dffd86b7-rldzd_b7462081-0162-4bd6-96fe-23a8c29df0db/manager/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.082540 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-67455b77fb-8g2hw_c0dc7222-a511-4010-b7ad-f1d4716958f8/kube-rbac-proxy/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.150966 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-67455b77fb-8g2hw_c0dc7222-a511-4010-b7ad-f1d4716958f8/manager/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.221194 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.382652 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.406203 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.408422 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.550437 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/util/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.564123 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/pull/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.564668 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_eb32d7f4ca93b1940e0297b631b2a801b172217c3fc327a542ba9cdba4chnxm_17e735d4-82c6-4083-bd01-382013995cc2/extract/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.706761 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-774b65955b-mtvmd_7c7223fc-d7fe-416d-8c4f-872f399ad3f3/kube-rbac-proxy/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.793300 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-774b65955b-mtvmd_7c7223fc-d7fe-416d-8c4f-872f399ad3f3/manager/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.873135 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6b57d4f86f-pkdl8_00cc5552-7130-40ca-ab43-b6525d3199f4/kube-rbac-proxy/0.log" Nov 11 15:17:20 crc kubenswrapper[4842]: I1111 15:17:20.900141 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-6b57d4f86f-pkdl8_00cc5552-7130-40ca-ab43-b6525d3199f4/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.031446 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d445c6d8b-bqk7b_d200f269-63a1-4cee-820f-1b42538f1fb9/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.071051 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d445c6d8b-bqk7b_d200f269-63a1-4cee-820f-1b42538f1fb9/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.264939 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-64cbcd8bcf-b9q8b_6c3ed8b6-85b5-402b-994a-ca068cc5a357/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.367909 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-8444f8f688-gl575_8976057b-f908-4295-93a2-0bd3bb1441da/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.463414 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-64cbcd8bcf-b9q8b_6c3ed8b6-85b5-402b-994a-ca068cc5a357/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.497772 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-8444f8f688-gl575_8976057b-f908-4295-93a2-0bd3bb1441da/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.552180 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c68d88c57-2k92j_d90f01be-5138-44bc-8330-0e8ee3914ba8/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.723936 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5c68d88c57-2k92j_d90f01be-5138-44bc-8330-0e8ee3914ba8/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.758154 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67c5b7495b-2sfch_fae3f9e9-7308-454c-80e2-c836cfa04a44/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.772702 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67c5b7495b-2sfch_fae3f9e9-7308-454c-80e2-c836cfa04a44/manager/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.898290 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-697bcb486c-xcdsm_20a746aa-153e-4ad3-afb7-e5d771927b18/kube-rbac-proxy/0.log" Nov 11 15:17:21 crc kubenswrapper[4842]: I1111 15:17:21.990173 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-697bcb486c-xcdsm_20a746aa-153e-4ad3-afb7-e5d771927b18/manager/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.089530 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-fdd8575d6-rqzfb_d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5/kube-rbac-proxy/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.163450 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-fdd8575d6-rqzfb_d976acf1-8ebd-4a2e-9f58-c03fcc6c5bd5/manager/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.247216 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-8588b44bb6-2m4gd_828ba013-e0fe-452c-a8ae-2dbb8e9436b4/kube-rbac-proxy/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.384021 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-8588b44bb6-2m4gd_828ba013-e0fe-452c-a8ae-2dbb8e9436b4/manager/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.421852 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-97dc668d8-scbz4_8458ea94-f568-498e-9f67-f1a31cdb2fdf/kube-rbac-proxy/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.424402 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-97dc668d8-scbz4_8458ea94-f568-498e-9f67-f1a31cdb2fdf/manager/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.570754 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-54948dd897l2jn8_3f8fa56e-98d6-4af9-9ea6-13917e0c5aee/kube-rbac-proxy/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.622271 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-54948dd897l2jn8_3f8fa56e-98d6-4af9-9ea6-13917e0c5aee/manager/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.745238 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56b55c68d5-v2ffq_74d23e18-019f-4f09-9011-8d495ff3c70b/kube-rbac-proxy/0.log" Nov 11 15:17:22 crc kubenswrapper[4842]: I1111 15:17:22.888537 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77d445568-tvtkj_e928b7e0-ef80-4622-9bec-93c14a6c734d/kube-rbac-proxy/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.127958 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-clx45_249af774-2a96-4177-bede-702ebe9025c9/registry-server/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.195644 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77d445568-tvtkj_e928b7e0-ef80-4622-9bec-93c14a6c734d/operator/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.365602 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6559d764b4-ntbtw_85789962-b64f-422a-a2b4-4f98a786be81/kube-rbac-proxy/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.437344 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6559d764b4-ntbtw_85789962-b64f-422a-a2b4-4f98a786be81/manager/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.556977 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-776bc4cb49-lh5x5_0b0bd151-ad85-46db-8425-fe640a956d01/kube-rbac-proxy/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.627816 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-776bc4cb49-lh5x5_0b0bd151-ad85-46db-8425-fe640a956d01/manager/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.757972 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-phmkh_dcb386ac-da43-4629-a57f-1d272c31bd46/operator/0.log" Nov 11 15:17:23 crc kubenswrapper[4842]: I1111 15:17:23.873886 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-57cf4f487c-8hwbs_79eedf2f-0af7-46fa-aa0e-7d965ee918d3/kube-rbac-proxy/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.037710 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56b55c68d5-v2ffq_74d23e18-019f-4f09-9011-8d495ff3c70b/manager/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.095086 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5cc784f744-5p2r8_f179c06b-83ea-4ece-b789-7bb5d75e05d5/kube-rbac-proxy/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.096327 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-57cf4f487c-8hwbs_79eedf2f-0af7-46fa-aa0e-7d965ee918d3/manager/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.257245 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-66ff8cb84f-nqlnk_0f38813f-c55c-43d3-94bd-3ee9152e3db3/kube-rbac-proxy/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.328614 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5cc784f744-5p2r8_f179c06b-83ea-4ece-b789-7bb5d75e05d5/manager/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.353010 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-66ff8cb84f-nqlnk_0f38813f-c55c-43d3-94bd-3ee9152e3db3/manager/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.443477 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6c495746fb-mgjxt_cfcfc6be-d566-4ba4-87e9-6157d249adc0/kube-rbac-proxy/0.log" Nov 11 15:17:24 crc kubenswrapper[4842]: I1111 15:17:24.534552 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6c495746fb-mgjxt_cfcfc6be-d566-4ba4-87e9-6157d249adc0/manager/0.log" Nov 11 15:17:39 crc kubenswrapper[4842]: I1111 15:17:39.621002 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6tqv7_e4ac96a0-0de8-47d1-b101-4054af9c7fe0/control-plane-machine-set-operator/0.log" Nov 11 15:17:39 crc kubenswrapper[4842]: I1111 15:17:39.802553 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-97v95_0c277b36-785e-4e8f-828e-17e36dac70be/kube-rbac-proxy/0.log" Nov 11 15:17:39 crc kubenswrapper[4842]: I1111 15:17:39.813793 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-97v95_0c277b36-785e-4e8f-828e-17e36dac70be/machine-api-operator/0.log" Nov 11 15:17:44 crc kubenswrapper[4842]: I1111 15:17:44.960718 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:17:44 crc kubenswrapper[4842]: I1111 15:17:44.961175 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:17:51 crc kubenswrapper[4842]: I1111 15:17:51.538304 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-fwg9f_14c43b93-a309-434e-8379-c48dca27130f/cert-manager-controller/0.log" Nov 11 15:17:51 crc kubenswrapper[4842]: I1111 15:17:51.721879 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-djwf2_9548e4cc-cf32-4973-bb29-5525fee6d3e8/cert-manager-webhook/0.log" Nov 11 15:17:51 crc kubenswrapper[4842]: I1111 15:17:51.769648 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-5ks7v_32eb5634-bee2-4ae4-89ad-cad4e90a79d1/cert-manager-cainjector/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.350895 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5859445d84-62bzc_d3481847-dbe0-4b95-ba37-92efb99cbc58/nmstate-console-plugin/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.520012 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-4rnrz_d3a048b3-b011-4646-a47f-c51fa6177169/nmstate-handler/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.528636 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z9xns_836465ab-91a1-4433-9182-be504f2d4b33/kube-rbac-proxy/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.597334 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z9xns_836465ab-91a1-4433-9182-be504f2d4b33/nmstate-metrics/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.743516 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-65474b4696-mbppg_473113a5-da7d-4f9f-b6c9-865ed25b03fe/nmstate-operator/0.log" Nov 11 15:18:03 crc kubenswrapper[4842]: I1111 15:18:03.799682 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-jzn5j_34787e6f-0d9b-41f6-8cc8-682249a243a2/nmstate-webhook/0.log" Nov 11 15:18:14 crc kubenswrapper[4842]: I1111 15:18:14.961007 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:18:14 crc kubenswrapper[4842]: I1111 15:18:14.961709 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:18:14 crc kubenswrapper[4842]: I1111 15:18:14.961763 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 15:18:14 crc kubenswrapper[4842]: I1111 15:18:14.962890 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 15:18:14 crc kubenswrapper[4842]: I1111 15:18:14.962952 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1" gracePeriod=600 Nov 11 15:18:15 crc kubenswrapper[4842]: I1111 15:18:15.237202 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1" exitCode=0 Nov 11 15:18:15 crc kubenswrapper[4842]: I1111 15:18:15.237270 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1"} Nov 11 15:18:15 crc kubenswrapper[4842]: I1111 15:18:15.237549 4842 scope.go:117] "RemoveContainer" containerID="2721418d52cdf1dde96139bf697dc034eb4ee91ea92829ebbfa0c3bc46c9e26c" Nov 11 15:18:16 crc kubenswrapper[4842]: I1111 15:18:16.247668 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerStarted","Data":"0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e"} Nov 11 15:18:16 crc kubenswrapper[4842]: I1111 15:18:16.728729 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-z8m9r_12b20cff-a77f-429a-81f4-ec7e34de65e9/kube-rbac-proxy/0.log" Nov 11 15:18:16 crc kubenswrapper[4842]: I1111 15:18:16.848257 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-z8m9r_12b20cff-a77f-429a-81f4-ec7e34de65e9/controller/0.log" Nov 11 15:18:16 crc kubenswrapper[4842]: I1111 15:18:16.968816 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6fbb69bdf8-gf25w_9ea46a4b-b757-47ae-a1b1-d7e82c5980e0/frr-k8s-webhook-server/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.076463 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.235443 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.281925 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.298110 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.331599 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.468608 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.522957 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.527426 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.543323 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.701860 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-reloader/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.720810 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-frr-files/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.756133 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/cp-metrics/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.765675 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/controller/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.910671 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/kube-rbac-proxy/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.946629 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/frr-metrics/0.log" Nov 11 15:18:17 crc kubenswrapper[4842]: I1111 15:18:17.974917 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/kube-rbac-proxy-frr/0.log" Nov 11 15:18:18 crc kubenswrapper[4842]: I1111 15:18:18.092598 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/reloader/0.log" Nov 11 15:18:18 crc kubenswrapper[4842]: I1111 15:18:18.184263 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-579ffdf495-n54pn_58177de2-efee-407d-82ad-b0319114f876/manager/0.log" Nov 11 15:18:18 crc kubenswrapper[4842]: I1111 15:18:18.390158 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8689778684-tflc4_f426ee4c-af44-4cc2-b9ae-8d83e3816bba/webhook-server/0.log" Nov 11 15:18:18 crc kubenswrapper[4842]: I1111 15:18:18.532705 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vjwsf_5e9d7317-e2f0-4262-a288-adec1afe4657/kube-rbac-proxy/0.log" Nov 11 15:18:19 crc kubenswrapper[4842]: I1111 15:18:19.102181 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-vjwsf_5e9d7317-e2f0-4262-a288-adec1afe4657/speaker/0.log" Nov 11 15:18:19 crc kubenswrapper[4842]: I1111 15:18:19.643797 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-x8q5h_b09ac28a-d43e-4609-b418-5e7f1d7e22a9/frr/0.log" Nov 11 15:18:30 crc kubenswrapper[4842]: I1111 15:18:30.897396 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.150274 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.151018 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.208542 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.336944 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/extract/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.344160 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.385305 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7994a39a9837f7fc2f9e7a84d68062bde361bde6a099852dbc2fe4cc77xms89_ce76dfe0-34fe-4ff7-8a17-d99f59a16522/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.502142 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.690327 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.704626 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.710649 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.851338 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/util/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.866476 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/pull/0.log" Nov 11 15:18:31 crc kubenswrapper[4842]: I1111 15:18:31.876913 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7c9511186b05a225171a531aea96126f62bfba4b201e3748c68c700fa0qlc7l_73f91574-d20e-4a67-98a6-ba1841b5e35f/extract/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.024311 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.228370 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.228462 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.236643 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.374828 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-utilities/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.467493 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/extract-content/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.595391 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.752881 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.796776 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:18:32 crc kubenswrapper[4842]: I1111 15:18:32.813452 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.008276 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-utilities/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.054914 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wzjr7_f0671ff4-7fd5-4886-82ed-cd0ca45f39d1/registry-server/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.089661 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/extract-content/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.247988 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-94sst_46efe664-2d21-4657-b466-579abe4f7f02/marketplace-operator/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.490938 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.660532 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.708650 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.792302 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.916942 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-9lcrs_7c85f60b-9964-4e28-a20c-bc21b4bc9680/registry-server/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.935383 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-utilities/0.log" Nov 11 15:18:33 crc kubenswrapper[4842]: I1111 15:18:33.973952 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/extract-content/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.153246 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.205028 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fhxng_1110211a-8e95-489f-ab1c-f13f4ca75b79/registry-server/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.350287 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.367421 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.370013 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.543877 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-content/0.log" Nov 11 15:18:34 crc kubenswrapper[4842]: I1111 15:18:34.543946 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/extract-utilities/0.log" Nov 11 15:18:35 crc kubenswrapper[4842]: I1111 15:18:35.174856 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6wlm5_ea619a4b-ce46-4a6d-b08c-7d5d3ddb10d3/registry-server/0.log" Nov 11 15:18:45 crc kubenswrapper[4842]: I1111 15:18:45.910895 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-7c8cf85677-5lnv2_36861a8f-d7ae-47db-b504-1eb8a1694af7/prometheus-operator/0.log" Nov 11 15:18:46 crc kubenswrapper[4842]: I1111 15:18:46.080323 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6b97475496-gxfm8_688f6c76-6b40-4937-9040-6fc178c7740d/prometheus-operator-admission-webhook/0.log" Nov 11 15:18:46 crc kubenswrapper[4842]: I1111 15:18:46.106607 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6b97475496-6sfgg_dc69b653-7e30-40ed-995a-bd2ca759365c/prometheus-operator-admission-webhook/0.log" Nov 11 15:18:46 crc kubenswrapper[4842]: I1111 15:18:46.331361 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-cc5f78dfc-8ckgj_ad3cf656-3491-4507-bd22-df41ef4576d8/operator/0.log" Nov 11 15:18:46 crc kubenswrapper[4842]: I1111 15:18:46.343612 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-54bc95c9fb-hw9wl_582cbdc6-be31-4fde-904d-820ea6228929/perses-operator/0.log" Nov 11 15:18:56 crc kubenswrapper[4842]: E1111 15:18:56.724418 4842 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.155:52696->38.102.83.155:44429: write tcp 38.102.83.155:52696->38.102.83.155:44429: write: broken pipe Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.486127 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:18:58 crc kubenswrapper[4842]: E1111 15:18:58.486957 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f529955-0fd0-4ee6-9d6e-435b7528fd28" containerName="container-00" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.486975 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f529955-0fd0-4ee6-9d6e-435b7528fd28" containerName="container-00" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.487272 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f529955-0fd0-4ee6-9d6e-435b7528fd28" containerName="container-00" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.491772 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.501175 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.512794 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.512841 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.512875 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmrdn\" (UniqueName: \"kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.614015 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.614114 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.614181 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmrdn\" (UniqueName: \"kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.614518 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.614560 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.652475 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmrdn\" (UniqueName: \"kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn\") pod \"redhat-operators-9hx6n\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:58 crc kubenswrapper[4842]: I1111 15:18:58.811742 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:18:59 crc kubenswrapper[4842]: I1111 15:18:59.453481 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:18:59 crc kubenswrapper[4842]: I1111 15:18:59.670533 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerStarted","Data":"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802"} Nov 11 15:18:59 crc kubenswrapper[4842]: I1111 15:18:59.670962 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerStarted","Data":"041b8911e63a17c8f3e104f80e803e3fd6efc5f0c4f79e4509b08f18a3623aed"} Nov 11 15:19:00 crc kubenswrapper[4842]: I1111 15:19:00.681153 4842 generic.go:334] "Generic (PLEG): container finished" podID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerID="a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802" exitCode=0 Nov 11 15:19:00 crc kubenswrapper[4842]: I1111 15:19:00.681230 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerDied","Data":"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802"} Nov 11 15:19:00 crc kubenswrapper[4842]: I1111 15:19:00.683194 4842 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 11 15:19:01 crc kubenswrapper[4842]: I1111 15:19:01.695161 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerStarted","Data":"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58"} Nov 11 15:19:06 crc kubenswrapper[4842]: I1111 15:19:06.745620 4842 generic.go:334] "Generic (PLEG): container finished" podID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerID="e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58" exitCode=0 Nov 11 15:19:06 crc kubenswrapper[4842]: I1111 15:19:06.745707 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerDied","Data":"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58"} Nov 11 15:19:07 crc kubenswrapper[4842]: I1111 15:19:07.757761 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerStarted","Data":"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7"} Nov 11 15:19:07 crc kubenswrapper[4842]: I1111 15:19:07.777297 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9hx6n" podStartSLOduration=3.083917904 podStartE2EDuration="9.777280613s" podCreationTimestamp="2025-11-11 15:18:58 +0000 UTC" firstStartedPulling="2025-11-11 15:19:00.682915648 +0000 UTC m=+6551.343205267" lastFinishedPulling="2025-11-11 15:19:07.376278357 +0000 UTC m=+6558.036567976" observedRunningTime="2025-11-11 15:19:07.772527994 +0000 UTC m=+6558.432817653" watchObservedRunningTime="2025-11-11 15:19:07.777280613 +0000 UTC m=+6558.437570232" Nov 11 15:19:08 crc kubenswrapper[4842]: I1111 15:19:08.812116 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:08 crc kubenswrapper[4842]: I1111 15:19:08.812483 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:09 crc kubenswrapper[4842]: I1111 15:19:09.872724 4842 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hx6n" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="registry-server" probeResult="failure" output=< Nov 11 15:19:09 crc kubenswrapper[4842]: timeout: failed to connect service ":50051" within 1s Nov 11 15:19:09 crc kubenswrapper[4842]: > Nov 11 15:19:18 crc kubenswrapper[4842]: I1111 15:19:18.872445 4842 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:18 crc kubenswrapper[4842]: I1111 15:19:18.926767 4842 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:19 crc kubenswrapper[4842]: I1111 15:19:19.109264 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:19:20 crc kubenswrapper[4842]: I1111 15:19:20.889159 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9hx6n" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="registry-server" containerID="cri-o://9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7" gracePeriod=2 Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.360299 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.509304 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content\") pod \"f3353e63-d62e-4b5a-9d6c-18156891953a\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.509405 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities\") pod \"f3353e63-d62e-4b5a-9d6c-18156891953a\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.509495 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmrdn\" (UniqueName: \"kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn\") pod \"f3353e63-d62e-4b5a-9d6c-18156891953a\" (UID: \"f3353e63-d62e-4b5a-9d6c-18156891953a\") " Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.510127 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities" (OuterVolumeSpecName: "utilities") pod "f3353e63-d62e-4b5a-9d6c-18156891953a" (UID: "f3353e63-d62e-4b5a-9d6c-18156891953a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.515999 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn" (OuterVolumeSpecName: "kube-api-access-dmrdn") pod "f3353e63-d62e-4b5a-9d6c-18156891953a" (UID: "f3353e63-d62e-4b5a-9d6c-18156891953a"). InnerVolumeSpecName "kube-api-access-dmrdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.597452 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3353e63-d62e-4b5a-9d6c-18156891953a" (UID: "f3353e63-d62e-4b5a-9d6c-18156891953a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.611721 4842 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.612028 4842 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3353e63-d62e-4b5a-9d6c-18156891953a-utilities\") on node \"crc\" DevicePath \"\"" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.612043 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmrdn\" (UniqueName: \"kubernetes.io/projected/f3353e63-d62e-4b5a-9d6c-18156891953a-kube-api-access-dmrdn\") on node \"crc\" DevicePath \"\"" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.911388 4842 generic.go:334] "Generic (PLEG): container finished" podID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerID="9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7" exitCode=0 Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.911503 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerDied","Data":"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7"} Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.911567 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hx6n" event={"ID":"f3353e63-d62e-4b5a-9d6c-18156891953a","Type":"ContainerDied","Data":"041b8911e63a17c8f3e104f80e803e3fd6efc5f0c4f79e4509b08f18a3623aed"} Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.911608 4842 scope.go:117] "RemoveContainer" containerID="9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.911906 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hx6n" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.946817 4842 scope.go:117] "RemoveContainer" containerID="e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.992237 4842 scope.go:117] "RemoveContainer" containerID="a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802" Nov 11 15:19:21 crc kubenswrapper[4842]: I1111 15:19:21.999356 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.018621 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9hx6n"] Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.036957 4842 scope.go:117] "RemoveContainer" containerID="9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7" Nov 11 15:19:22 crc kubenswrapper[4842]: E1111 15:19:22.037589 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7\": container with ID starting with 9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7 not found: ID does not exist" containerID="9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.037647 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7"} err="failed to get container status \"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7\": rpc error: code = NotFound desc = could not find container \"9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7\": container with ID starting with 9fe1a154803a932072bedddc9299715ab9ba6ec5156230918b46cf371c1bc6b7 not found: ID does not exist" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.037684 4842 scope.go:117] "RemoveContainer" containerID="e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58" Nov 11 15:19:22 crc kubenswrapper[4842]: E1111 15:19:22.039697 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58\": container with ID starting with e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58 not found: ID does not exist" containerID="e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.039737 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58"} err="failed to get container status \"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58\": rpc error: code = NotFound desc = could not find container \"e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58\": container with ID starting with e1b33f849b1bdbc3e3b3d81e49f4a8682ff0ef3ca70923359d3850b0409f2f58 not found: ID does not exist" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.039764 4842 scope.go:117] "RemoveContainer" containerID="a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802" Nov 11 15:19:22 crc kubenswrapper[4842]: E1111 15:19:22.040435 4842 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802\": container with ID starting with a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802 not found: ID does not exist" containerID="a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.040463 4842 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802"} err="failed to get container status \"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802\": rpc error: code = NotFound desc = could not find container \"a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802\": container with ID starting with a94849da9fdc2fca44d60901511ea4623dcb1efb7e52a1d3dc6fc91dde2c6802 not found: ID does not exist" Nov 11 15:19:22 crc kubenswrapper[4842]: I1111 15:19:22.074389 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" path="/var/lib/kubelet/pods/f3353e63-d62e-4b5a-9d6c-18156891953a/volumes" Nov 11 15:20:44 crc kubenswrapper[4842]: I1111 15:20:44.961170 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:20:44 crc kubenswrapper[4842]: I1111 15:20:44.961834 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:20:46 crc kubenswrapper[4842]: I1111 15:20:46.820079 4842 generic.go:334] "Generic (PLEG): container finished" podID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerID="2e83745d26468a39f369ed9e07fa478613aa271fd12aaf6a9acf31d98daa9f57" exitCode=0 Nov 11 15:20:46 crc kubenswrapper[4842]: I1111 15:20:46.820128 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jmq82/must-gather-44lgc" event={"ID":"3aacdbf3-901a-4354-b448-30ef13002a5a","Type":"ContainerDied","Data":"2e83745d26468a39f369ed9e07fa478613aa271fd12aaf6a9acf31d98daa9f57"} Nov 11 15:20:46 crc kubenswrapper[4842]: I1111 15:20:46.821201 4842 scope.go:117] "RemoveContainer" containerID="2e83745d26468a39f369ed9e07fa478613aa271fd12aaf6a9acf31d98daa9f57" Nov 11 15:20:47 crc kubenswrapper[4842]: I1111 15:20:47.020602 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jmq82_must-gather-44lgc_3aacdbf3-901a-4354-b448-30ef13002a5a/gather/0.log" Nov 11 15:20:57 crc kubenswrapper[4842]: I1111 15:20:57.732440 4842 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jmq82/must-gather-44lgc"] Nov 11 15:20:57 crc kubenswrapper[4842]: I1111 15:20:57.733608 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-jmq82/must-gather-44lgc" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="copy" containerID="cri-o://a77e7b1118fffdf995aa5b4fc5650cd9335f69fa532b74827d447c1e0353ca22" gracePeriod=2 Nov 11 15:20:57 crc kubenswrapper[4842]: I1111 15:20:57.741715 4842 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jmq82/must-gather-44lgc"] Nov 11 15:20:57 crc kubenswrapper[4842]: I1111 15:20:57.927385 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jmq82_must-gather-44lgc_3aacdbf3-901a-4354-b448-30ef13002a5a/copy/0.log" Nov 11 15:20:57 crc kubenswrapper[4842]: I1111 15:20:57.927814 4842 generic.go:334] "Generic (PLEG): container finished" podID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerID="a77e7b1118fffdf995aa5b4fc5650cd9335f69fa532b74827d447c1e0353ca22" exitCode=143 Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.263784 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jmq82_must-gather-44lgc_3aacdbf3-901a-4354-b448-30ef13002a5a/copy/0.log" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.264594 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.355497 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output\") pod \"3aacdbf3-901a-4354-b448-30ef13002a5a\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.355765 4842 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwphp\" (UniqueName: \"kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp\") pod \"3aacdbf3-901a-4354-b448-30ef13002a5a\" (UID: \"3aacdbf3-901a-4354-b448-30ef13002a5a\") " Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.373339 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp" (OuterVolumeSpecName: "kube-api-access-lwphp") pod "3aacdbf3-901a-4354-b448-30ef13002a5a" (UID: "3aacdbf3-901a-4354-b448-30ef13002a5a"). InnerVolumeSpecName "kube-api-access-lwphp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.458672 4842 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwphp\" (UniqueName: \"kubernetes.io/projected/3aacdbf3-901a-4354-b448-30ef13002a5a-kube-api-access-lwphp\") on node \"crc\" DevicePath \"\"" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.548405 4842 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3aacdbf3-901a-4354-b448-30ef13002a5a" (UID: "3aacdbf3-901a-4354-b448-30ef13002a5a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.560401 4842 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3aacdbf3-901a-4354-b448-30ef13002a5a-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.942824 4842 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jmq82_must-gather-44lgc_3aacdbf3-901a-4354-b448-30ef13002a5a/copy/0.log" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.943684 4842 scope.go:117] "RemoveContainer" containerID="a77e7b1118fffdf995aa5b4fc5650cd9335f69fa532b74827d447c1e0353ca22" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.943723 4842 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jmq82/must-gather-44lgc" Nov 11 15:20:58 crc kubenswrapper[4842]: I1111 15:20:58.994869 4842 scope.go:117] "RemoveContainer" containerID="2e83745d26468a39f369ed9e07fa478613aa271fd12aaf6a9acf31d98daa9f57" Nov 11 15:21:00 crc kubenswrapper[4842]: I1111 15:21:00.071824 4842 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" path="/var/lib/kubelet/pods/3aacdbf3-901a-4354-b448-30ef13002a5a/volumes" Nov 11 15:21:14 crc kubenswrapper[4842]: I1111 15:21:14.961580 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:21:14 crc kubenswrapper[4842]: I1111 15:21:14.964294 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:21:44 crc kubenswrapper[4842]: I1111 15:21:44.961017 4842 patch_prober.go:28] interesting pod/machine-config-daemon-k84vc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 11 15:21:44 crc kubenswrapper[4842]: I1111 15:21:44.961779 4842 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 11 15:21:44 crc kubenswrapper[4842]: I1111 15:21:44.961850 4842 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" Nov 11 15:21:44 crc kubenswrapper[4842]: I1111 15:21:44.963364 4842 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e"} pod="openshift-machine-config-operator/machine-config-daemon-k84vc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 11 15:21:44 crc kubenswrapper[4842]: I1111 15:21:44.963527 4842 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" containerName="machine-config-daemon" containerID="cri-o://0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e" gracePeriod=600 Nov 11 15:21:45 crc kubenswrapper[4842]: E1111 15:21:45.093550 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:21:45 crc kubenswrapper[4842]: I1111 15:21:45.411651 4842 generic.go:334] "Generic (PLEG): container finished" podID="9f3edace-782c-4646-8a57-d39d8373bb14" containerID="0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e" exitCode=0 Nov 11 15:21:45 crc kubenswrapper[4842]: I1111 15:21:45.411701 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" event={"ID":"9f3edace-782c-4646-8a57-d39d8373bb14","Type":"ContainerDied","Data":"0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e"} Nov 11 15:21:45 crc kubenswrapper[4842]: I1111 15:21:45.411734 4842 scope.go:117] "RemoveContainer" containerID="d8fe6810803ceedfe2ffaf2665203a932232f9ca269054805c23df532622e6d1" Nov 11 15:21:45 crc kubenswrapper[4842]: I1111 15:21:45.412746 4842 scope.go:117] "RemoveContainer" containerID="0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e" Nov 11 15:21:45 crc kubenswrapper[4842]: E1111 15:21:45.413227 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.080255 4842 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pmwv9"] Nov 11 15:21:49 crc kubenswrapper[4842]: E1111 15:21:49.081251 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="registry-server" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081266 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="registry-server" Nov 11 15:21:49 crc kubenswrapper[4842]: E1111 15:21:49.081281 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="copy" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081289 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="copy" Nov 11 15:21:49 crc kubenswrapper[4842]: E1111 15:21:49.081307 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="extract-content" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081313 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="extract-content" Nov 11 15:21:49 crc kubenswrapper[4842]: E1111 15:21:49.081336 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="extract-utilities" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081342 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="extract-utilities" Nov 11 15:21:49 crc kubenswrapper[4842]: E1111 15:21:49.081356 4842 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="gather" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081361 4842 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="gather" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081561 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="copy" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081579 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3353e63-d62e-4b5a-9d6c-18156891953a" containerName="registry-server" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.081595 4842 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aacdbf3-901a-4354-b448-30ef13002a5a" containerName="gather" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.083324 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.103019 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pmwv9"] Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.135022 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4snk\" (UniqueName: \"kubernetes.io/projected/184a6472-bcb5-4779-9866-b81af4329def-kube-api-access-m4snk\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.135066 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-utilities\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.135378 4842 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-catalog-content\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.236999 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-catalog-content\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.237176 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4snk\" (UniqueName: \"kubernetes.io/projected/184a6472-bcb5-4779-9866-b81af4329def-kube-api-access-m4snk\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.237208 4842 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-utilities\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.237731 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-utilities\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.237998 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/184a6472-bcb5-4779-9866-b81af4329def-catalog-content\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.275702 4842 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4snk\" (UniqueName: \"kubernetes.io/projected/184a6472-bcb5-4779-9866-b81af4329def-kube-api-access-m4snk\") pod \"certified-operators-pmwv9\" (UID: \"184a6472-bcb5-4779-9866-b81af4329def\") " pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.447611 4842 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pmwv9" Nov 11 15:21:49 crc kubenswrapper[4842]: I1111 15:21:49.920747 4842 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pmwv9"] Nov 11 15:21:50 crc kubenswrapper[4842]: I1111 15:21:50.458143 4842 generic.go:334] "Generic (PLEG): container finished" podID="184a6472-bcb5-4779-9866-b81af4329def" containerID="6bae9213a46ab8b7757f5314c270b024759694b3940453db027d24fe05fe50c6" exitCode=0 Nov 11 15:21:50 crc kubenswrapper[4842]: I1111 15:21:50.458190 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmwv9" event={"ID":"184a6472-bcb5-4779-9866-b81af4329def","Type":"ContainerDied","Data":"6bae9213a46ab8b7757f5314c270b024759694b3940453db027d24fe05fe50c6"} Nov 11 15:21:50 crc kubenswrapper[4842]: I1111 15:21:50.458214 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmwv9" event={"ID":"184a6472-bcb5-4779-9866-b81af4329def","Type":"ContainerStarted","Data":"af8540106d0710ac9c61199978c4cf70e91bfe0962b8a4ec6780a39bce6903c2"} Nov 11 15:21:51 crc kubenswrapper[4842]: I1111 15:21:51.471406 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmwv9" event={"ID":"184a6472-bcb5-4779-9866-b81af4329def","Type":"ContainerStarted","Data":"d6ef08441946d787f23ff46ce3dfa1299e45624603a44b3d84df380212a01d4a"} Nov 11 15:21:52 crc kubenswrapper[4842]: I1111 15:21:52.482344 4842 generic.go:334] "Generic (PLEG): container finished" podID="184a6472-bcb5-4779-9866-b81af4329def" containerID="d6ef08441946d787f23ff46ce3dfa1299e45624603a44b3d84df380212a01d4a" exitCode=0 Nov 11 15:21:52 crc kubenswrapper[4842]: I1111 15:21:52.482441 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmwv9" event={"ID":"184a6472-bcb5-4779-9866-b81af4329def","Type":"ContainerDied","Data":"d6ef08441946d787f23ff46ce3dfa1299e45624603a44b3d84df380212a01d4a"} Nov 11 15:21:53 crc kubenswrapper[4842]: I1111 15:21:53.497704 4842 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pmwv9" event={"ID":"184a6472-bcb5-4779-9866-b81af4329def","Type":"ContainerStarted","Data":"541cb7f947f22411c22b17026df3c7b41c6490add7a27625d14ba7bf8c0c6c2f"} Nov 11 15:21:53 crc kubenswrapper[4842]: I1111 15:21:53.514925 4842 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pmwv9" podStartSLOduration=1.755093472 podStartE2EDuration="4.514903362s" podCreationTimestamp="2025-11-11 15:21:49 +0000 UTC" firstStartedPulling="2025-11-11 15:21:50.459972524 +0000 UTC m=+6721.120262143" lastFinishedPulling="2025-11-11 15:21:53.219782414 +0000 UTC m=+6723.880072033" observedRunningTime="2025-11-11 15:21:53.51291019 +0000 UTC m=+6724.173199809" watchObservedRunningTime="2025-11-11 15:21:53.514903362 +0000 UTC m=+6724.175193001" Nov 11 15:21:57 crc kubenswrapper[4842]: I1111 15:21:57.060338 4842 scope.go:117] "RemoveContainer" containerID="0266b7209db5a0aa8266f36f3986eba5be4a04fe36df4e571e258eb56064302e" Nov 11 15:21:57 crc kubenswrapper[4842]: E1111 15:21:57.062742 4842 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-k84vc_openshift-machine-config-operator(9f3edace-782c-4646-8a57-d39d8373bb14)\"" pod="openshift-machine-config-operator/machine-config-daemon-k84vc" podUID="9f3edace-782c-4646-8a57-d39d8373bb14" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515104652234024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015104652235017365 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015104634556016516 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015104634556015466 5ustar corecore